code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
#dependencies
import warnings
warnings.filterwarnings("ignore")
!pip install contractions
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('words')
import re
import pickle
import numpy as np
import pandas as pd
import contractions
import seaborn as sns
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, LSTM, Conv1D, MaxPooling1D, Dropout, BatchNormalization
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
import gensim
from sklearn.metrics import confusion_matrix
import tensorflow as tf
from tensorflow.keras.models import Model, load_model
#from tensorflow.keras.callbacks import ReduceLROnPlateau, LearningRateScheduler, EarlyStopping, ModelCheckpoint
from kaggle_datasets import KaggleDatasets
import transformers
from transformers import TFAutoModel, AutoTokenizer
from tqdm.notebook import tqdm
#from tokenizers import Tokenizer
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.optimizers import Adam
import tensorflow.keras
df_2 = pd.read_csv("/kaggle/input/fake-news/train.csv", header=0, index_col=0)
df_t = pd.read_csv("/kaggle/input/fake-news/test.csv", header=0, index_col=0)
df_2 = df_2.drop(['title','author'], axis = 1)
df_t = df_t.drop(['title','author'], axis = 1)
df_2.dropna(inplace = True)
df_t.fillna('',inplace = True)
#print(df_2.isnull().sum(axis = 0))
def clean_text(text_col ):
text_col = text_col.apply(lambda x: [contractions.fix(word, slang=False).lower() for word in x.split()])
text_col = text_col.apply(lambda x: [re.sub(r'[^\w\s]','', word) for word in x])
stop_words = set(stopwords.words('english'))
text_col = text_col.apply(lambda x: [word for word in x if word not in stop_words])
text_col = text_col.apply(lambda x: [word for word in x if re.search("[@_!#$%^&*()<>?/|}{~:0-9]", word) == None])
return text_col
df_2["text"] = clean_text(df_2["text"])
df_t["text"] = clean_text(df_t["text"])
df_2['label'] = df_2['label'].apply(lambda x: int(x))
y = df_2['label']
#y = df_2["label"]
#lemmatizing
wordnet_lemmatizer = WordNetLemmatizer()
x = []
x_test = []
english_words = set(nltk.corpus.words.words())
for words in df_2['text']:
tmp = []
fil_wor = [wordnet_lemmatizer.lemmatize(word, 'n') for word in words if word in english_words]
tmp.extend(fil_wor)
x.append(tmp)
for words in df_t['text']:
tmp = []
fil_wor = [wordnet_lemmatizer.lemmatize(word, 'n') for word in words if word in english_words]
tmp.extend(fil_wor)
x_test.append(tmp)
df_2["text"] = x
df_t["text"] = x_test
#creating word embedding
x_all = x.copy()
for y in range(len(x_test)):
x_all.append(x_test[y])
#n of vectors we are generating
EMBEDDING_DIM = 100
#Creating Word Vectors by Word2Vec Method (takes time...)
w2v_model = gensim.models.Word2Vec(sentences=x_all, vector_size=EMBEDDING_DIM, window=5, min_count=1)
print(len(w2v_model.wv))
#testing a word embedding
w2v_model.wv["liberty"]
#similarity between words
word = 'people'
w2v_model.wv.most_similar(word)
#tokenizing
tokenizer = Tokenizer()
tokenizer.fit_on_texts(x)
x = tokenizer.texts_to_sequences(x)
x_test = tokenizer.texts_to_sequences(x_test)
print(x[0][:10])
word_index = tokenizer.word_index
for word, num in word_index.items():
print(f"{word} -> {num}")
if num == 10:
break
#padding
maxlen = 700
x = pad_sequences(x, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)
# Function to create weight matrix from word2vec gensim model
def get_weight_matrix(model, vocab):
# total vocabulary size plus 0 for unknown words
vocab_size = len(vocab) + 1
# define weight matrix dimensions with all 0
weight_matrix = np.zeros((vocab_size, EMBEDDING_DIM))
# step vocab, store vectors using the Tokenizer's integer mapping
for word, i in vocab.items():
weight_matrix[i] = model[word]
return weight_matrix, vocab_size
#Getting embedding vectors from word2vec and usings it as weights of non-trainable keras embedding layer
embedding_vectors, vocab_size = get_weight_matrix(w2v_model.wv, word_index)
#Defining Neural Network
model = Sequential()
#Non-trainable embeddidng layer
model.add(Embedding(vocab_size, output_dim=EMBEDDING_DIM, weights=[embedding_vectors], input_length=maxlen, trainable=False))
#LSTM
model.add(Dropout(0.2))
#model.add(Conv1D(filters=32, kernel_size=5, padding='same', activation='relu'))
#model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(LSTM(units=128,dropout=0.2, return_sequences=True))
model.add(BatchNormalization())
model.add(LSTM(units=128,dropout=0.2))
model.add(BatchNormalization())
#model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.summary()
X_train, X_test, y_train, y_test = train_test_split(x, y)
model.fit(X_train, y_train, validation_data= (X_test,y_test), epochs=50)
#validation_data_performance evaluation
y_pred = (model.predict(X_test) >= 0.5).astype("int")
print(accuracy_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(10,7))
sns.heatmap(cm, annot=True)
plt.xlabel('Predicted')
plt.ylabel('Truth')
#test_data_for_scoring_on_kaggle
y_t = (model.predict(x_test) >= 0.5).astype("int")
result = pd.DataFrame({"id" :df_t.index, "label":y_t.squeeze() }, index = None )
result.to_csv("result_rnn.csv",index = False)
#let's include an attention layer in our model
class Attention(tf.keras.Model):
def __init__(self, units):
super(Attention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, features, hidden):
hidden_with_time_axis = tf.expand_dims(hidden, 1)
score = tf.nn.tanh(self.W1(features)+ self.W2(hidden_with_time_axis))
attention_weights = tf.nn.softmax(self.V(score),axis = 1)
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis = 1)
return context_vector, attention_weights
'''#dd attention layer to the deep learning network
class Attention(Layer):
def __init__(self,**kwargs):
super(Attention,self).__init__(**kwargs)
def build(self,input_shape):
self.W=self.add_weight(name='attention_weight', #shape=(input_shape[-1],1),
initializer='random_normal', trainable=True)
self.b=self.add_weight(name='attention_bias', #shape=(input_shape[1],1),
initializer='zeros', trainable=True)
super(Attention, self).build(input_shape)
def call(self,x):
# Alignment scores. Pass them through tanh function
e = K.tanh(K.dot(x,self.W)+self.b)
# Remove dimension of size 1
e = K.squeeze(e, axis=-1)
# Compute the weights
alpha = K.softmax(e)
# Reshape to tensorFlow format
alpha = K.expand_dims(alpha, axis=-1)
# Compute the context vector
context = x * alpha
context = K.sum(context, axis=1)
return context'''
#RNN with Attention model
sequence_input = Input(shape = (maxlen,), dtype = "int32")
embedding = Embedding(vocab_size, output_dim=EMBEDDING_DIM, weights=[embedding_vectors], input_length=maxlen, trainable=False)(sequence_input)
dropout = Dropout(0.2)(embedding)
conv1 = Conv1D(filters=64, kernel_size=3, padding='same', activation='relu')(dropout)
maxp = MaxPooling1D(pool_size=2)(conv1)
#(lstm, state_h, state_c) = LSTM(units=128,return_sequences=True,dropout=0.2, return_state= True)(maxp)
#bn1 = BatchNormalization()((lstm, state_h, state_c))
(lstm, state_h, state_c) = LSTM(units=128,dropout=0.2, return_sequences=True, return_state= True)(maxp)
context_vector, attention_weights = Attention(10)(lstm, state_h)
densee = Dense(20, activation='relu')(context_vector)
#bn = BatchNormalization()(densee)
dropout2 = Dropout(0.2)(densee)
densef = Dense(1, activation='sigmoid')(dropout2)
model = tensorflow.keras.Model(inputs = sequence_input, outputs = densef)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
display(model.summary())
model.fit(X_train, y_train, validation_data= (X_test,y_test), epochs=50)
#checking accuracy on validation data
y_pred = (model.predict(X_test) >= 0.5).astype("int")
print(accuracy_score(y_test, y_pred))
y_t = (model.predict(x_test) >= 0.5).astype("int")
result = pd.DataFrame({"id" :df_t.index, "label":y_t.squeeze() }, index = None )
result.to_csv("result_rnnattenion.csv",index = False)
```
| github_jupyter |
# Gaussian mixture model
The model in prototyped with TensorFlow Probability and inferecne is performed with variational Bayes by stochastic gradient descent.
Details on [Wikipedia](https://en.wikipedia.org/wiki/Mixture_model#Gaussian_mixture_model).
Some codes are borrowed from
[Brendan Hasz](https://brendanhasz.github.io/2019/06/12/tfp-gmm.html) and
[TensorFlow Probability examples](https://www.tensorflow.org/probability/overview)
Author: Yuanhua Huang
Date: 29/01/2020
#### Definition of likelihood
Below is the definition of likelihood by introducing the latent variable Z for sample assignment identity, namely, Z is a Categorical distribution (a sepcial case of multinomial with total_counts=1), and the prior $P(z_i=k)$ can be specified per data point or shared by whole data set:
$$ \mathcal{L} = P(X | \mu, \sigma, Z) = \prod_{i=1}^N \prod_{j=1}^D \prod_{k=1}^K P(z_i=k) \{ \mathcal{N}(x_{ij}|\mu_{k,j}, \sigma_{k,j}) \}^{z_i=k}$$
The evidence lower bound (ELBO) can be written as
$$\mathtt{L}=\mathtt{KL}(q(Y)||p(Y)) - \int{q(Y)\log{p(X|Y)}dY}$$
where $Y$ denotes the set of all unknown variables and $X$ denotes the observed data points. The derivations can be found in page 463 in [Bishop, PRML 2006](https://www.springer.com/gp/book/9780387310732).
**Note**, this implementation is mainly for tutorial example, and hasn't been optimised, for example introducing multiple initialization to avoid local optimal caused by poor initialization.
**Also**, the assignment variable $z$ can be marginalised and the impelementation can be found in
[GaussianMixture_VB.ipynb](https://github.com/huangyh09/TensorFlow-Bayes/blob/master/examples/GaussianMixture_VB.ipynb).
```
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
# Random seed
np.random.seed(1)
tf.random.set_seed(1)
```
## Generate data
```
# Generate some data
np.random.seed(0)
N = 3000
X = np.random.randn(N, 2).astype('float32')
X[:1000, :] += [2, 0]
X[1000:2000, :] -= [2, 4]
X[2000:, :] += [-2, 4]
# Plot the data
plt.plot(X[:, 0], X[:, 1], '.')
plt.axis('equal')
plt.show()
```
## Define model
```
from TFBayes.mixture.Gaussian_MM_full import GaussianMixture
model = GaussianMixture(3, 2, 3000)
# model.set_prior(theta_prior = tfd.Dirichlet(5 * tf.ones((3, ))))
model.KLsum
# model.alpha, model.beta, model.gamma
# losses = model.fit(X, sampling=False, learn_rate=0.03, num_steps=500)
losses = model.fit(X, sampling=True, learn_rate=0.02, num_steps=500, n_sample=10)
plt.plot(losses)
plt.show()
# Compute log likelihood at each point on a grid
Np = 100 #number of grid points
Xp, Yp = np.meshgrid(np.linspace(-6,6,Np), np.linspace(-6,6,Np))
Pp = np.column_stack([Xp.flatten(), Yp.flatten()])
Z = model.logLik(Pp.astype('float32'), sampling=False, use_ident=False)
Z = np.reshape(Z, (Np, Np))
# Show the fit mixture density
plt.imshow(np.exp(Z),
extent=(-6, 6, -6, 6),
origin='lower')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Likelihood')
```
## Model
The codes below is also included in [TFBayes.mixture.Gaussian_MM_full.py](https://github.com/huangyh09/TensorFlow-Bayes/blob/master/TFBayes/mixture/Gaussian_MM_full.py).
```
class GaussianMixture():
"""A Bayesian Gaussian mixture model.
Assumes Gaussians' variances in each dimension are independent.
Parameters
----------
Nc : int > 0
Number of mixture components.
Nd : int > 0
Number of dimensions.
Ns : int > 0
Number of data points.
"""
def __init__(self, Nc, Nd, Ns=0):
# Initialize
self.Nc = Nc
self.Nd = Nd
self.Ns = Ns
# Variational distribution variables for means
self.locs = tf.Variable(tf.random.normal((Nc, Nd)))
self.scales = tf.Variable(tf.pow(tf.random.gamma((Nc, Nd), 5, 5), -0.5))
# Variational distribution variables for standard deviations
self.alpha = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.))
self.beta = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.))
# Variational distribution variables for assignment logit
self.gamma = tf.Variable(tf.random.uniform((Ns, Nc), -2, 2))
self.set_prior()
def set_prior(self, mu_prior=None, sigma_prior=None, ident_prior=None):
"""Set prior ditributions
"""
# Prior distributions for the means
if mu_prior is None:
self.mu_prior = tfd.Normal(tf.zeros((self.Nc, self.Nd)),
tf.ones((self.Nc, self.Nd)))
else:
self.mu_prior = self.mu_prior
# Prior distributions for the standard deviations
if sigma_prior is None:
self.sigma_prior = tfd.Gamma(2 * tf.ones((self.Nc, self.Nd)),
2 * tf.ones((self.Nc, self.Nd)))
else:
self.sigma_prior = sigma_prior
# Prior distributions for sample assignment
if ident_prior is None:
self.ident_prior = tfd.Multinomial(total_count=1,
probs=tf.ones((self.Ns, self.Nc))/self.Nc)
else:
self.ident_prior = ident_prior
@property
def mu(self):
"""Variational posterior for distribution mean"""
return tfd.Normal(self.locs, self.scales)
@property
def sigma(self):
"""Variational posterior for distribution variance"""
return tfd.Gamma(self.alpha, self.beta)
# return tfd.Gamma(tf.math.exp(self.alpha), tf.math.exp(self.beta))
@property
def ident(self):
return tfd.Multinomial(total_count=1,
probs=tf.math.softmax(self.gamma))
@property
def KLsum(self):
"""
Sum of KL divergences between posteriors and priors
The KL divergence for multinomial distribution is defined manually
"""
kl_mu = tf.reduce_sum(tfd.kl_divergence(self.mu, self.mu_prior))
kl_sigma = tf.reduce_sum(tfd.kl_divergence(self.sigma, self.sigma_prior))
kl_ident = tf.reduce_sum(self.ident.mean() *
tf.math.log(self.ident.mean() /
self.ident_prior.mean())) # axis=0
return kl_mu + kl_sigma + kl_ident
def logLik(self, x, sampling=False, n_sample=10, use_ident=True):
"""Compute log likelihood given a batch of data.
Parameters
----------
x : tf.Tensor, (n_sample, n_dimention)
A batch of data
sampling : bool
Whether to sample from the variational posterior
distributions (if True, the default), or just use the
mean of the variational distributions (if False).
n_sample : int
The number of samples to generate
use_ident : bool
Setting True for fitting the model and False for testing logLik
Returns
-------
log_likelihoods : tf.Tensor
Log likelihood for each sample
"""
#TODO: sampling doesn't converge well in the example data set
Nb, Nd = x.shape
x = tf.reshape(x, (1, Nb, 1, Nd)) # (n_sample, Ns, Nc, Nd)
# Sample from the variational distributions
if sampling:
_mu = self.mu.sample((n_sample, 1))
_sigma = tf.pow(self.sigma.sample((n_sample, 1)), -0.5)
else:
_mu = tf.reshape(self.mu.mean(), (1, 1, self.Nc, self.Nd))
_sigma = tf.pow(tf.reshape(self.sigma.mean(),
(1, 1, self.Nc, self.Nd)), -0.5)
# Calculate the probability density
_model = tfd.Normal(_mu, _sigma)
_log_lik_mix = _model.log_prob(x)
if use_ident:
_ident = tf.reshape(self.ident.mean(), (1, self.Ns, self.Nc, 1))
_log_lik_mix = _log_lik_mix * _ident
log_likelihoods = tf.reduce_sum(_log_lik_mix, axis=[0, 2, 3])
else:
_fract = tf.reshape(tf.reduce_mean(self.ident.mean(), axis=0),
(1, 1, self.Nc, 1))
_log_lik_mix = _log_lik_mix + tf.math.log(_fract)
log_likelihoods = tf.reduce_mean(tf.math.reduce_logsumexp(
tf.reduce_sum(_log_lik_mix, axis=3), axis=2), axis=0)
return log_likelihoods
def fit(self, x, num_steps=200,
optimizer=None, learn_rate=0.05, **kwargs):
"""Fit the model's parameters"""
if optimizer is None:
optimizer = tf.optimizers.Adam(learning_rate=learn_rate)
loss_fn = lambda: (self.KLsum -
tf.reduce_sum(self.logLik(x, **kwargs)))
losses = tfp.math.minimize(loss_fn,
num_steps=num_steps,
optimizer=optimizer)
return losses
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
import pandas as pd
import seaborn as sns
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn import neighbors, datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from scipy.spatial import ConvexHull
from tqdm import tqdm
import random
plt.style.use('ggplot')
import pickle
from sklearn import tree
from sklearn.tree import export_graphviz
from joblib import dump, load
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from scipy.interpolate import interp1d
%matplotlib inline
def getAuc(X,y,test_size=0.25,max_depth=None,n_estimators=100,
minsplit=4,FPR=[],TPR=[],VERBOSE=False, USE_ONLY=None):
'''
get AUC given training data X, with target labels y
'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
CLASSIFIERS=[DecisionTreeClassifier(max_depth=max_depth, min_samples_split=minsplit,class_weight='balanced'),
RandomForestClassifier(n_estimators=n_estimators,
max_depth=max_depth,min_samples_split=minsplit,class_weight='balanced'),
ExtraTreesClassifier(n_estimators=n_estimators,
max_depth=max_depth,min_samples_split=minsplit,class_weight='balanced'),
AdaBoostClassifier(n_estimators=n_estimators),
GradientBoostingClassifier(n_estimators=n_estimators,max_depth=max_depth),
svm.SVC(kernel='rbf',gamma='scale',class_weight='balanced',probability=True)]
if USE_ONLY is not None:
if isinstance(USE_ONLY, (list,)):
CLASSIFIERS=[CLASSIFIERS[i] for i in USE_ONLY]
if isinstance(USE_ONLY, (int,)):
CLASSIFIERS=CLASSIFIERS[USE_ONLY]
for clf in CLASSIFIERS:
clf.fit(X_train,y_train)
y_pred=clf.predict_proba(X_test)
fpr, tpr, thresholds = metrics.roc_curve(y_test,y_pred[:,1], pos_label=1)
auc=metrics.auc(fpr, tpr)
if VERBOSE:
print(auc)
FPR=np.append(FPR,fpr)
TPR=np.append(TPR,tpr)
points=np.array([[a[0],a[1]] for a in zip(FPR,TPR)])
hull = ConvexHull(points)
x=np.argsort(points[hull.vertices,:][:,0])
auc=metrics.auc(points[hull.vertices,:][x,0],points[hull.vertices,:][x,1])
return auc,CLASSIFIERS
def saveFIG(filename='tmp.pdf',AXIS=False):
'''
save fig for publication
'''
import pylab as plt
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.margins(0,0)
if not AXIS:
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig(filename,dpi=300, bbox_inches = 'tight',
pad_inches = 0,transparent=False)
return
df=pd.read_csv('psychoByDiag.csv',index_col=0,sep=',')
df=df[df['DX']>0]
#df=df[df.DX.between(1,2)]
X=df.iloc[:,1:].values
y=df.iloc[:,0].values.astype(str)
y=[(x=='1')+0 for x in y]
Xdiag=X
ACC=[]
CLFdiag=None
for run in tqdm(np.arange(500)):
auc,CLFS=getAuc(X,y,test_size=0.2,max_depth=3,n_estimators=2,
minsplit=2,VERBOSE=False, USE_ONLY=[2])
ACC=np.append(ACC,auc)
if auc > 0.85:
CLFdiag=CLFS
sns.distplot(ACC)
np.median(ACC)
df=pd.read_csv('PSYCHO.DAT',header=None,index_col=0,sep='\s+')
df=df[df[1]>0]
#df=df[df[1].between(1,2)]
X=df.loc[:,2:].values
#y=df.loc[:,1].values.astype(str)
y=(df.loc[:,1]==1)+0
Xpsy=X
df=pd.read_csv('/home/ishanu/Dropbox/scratch_/Qfeatures.csv')
df=df[df.labels>0]
#df=df[df.labels.between(1,2)]
Xq=df.drop('labels',axis=1).values
#y=df.labels.values.astype(str)
X=np.c_[Xpsy,Xq]
#X=np.c_[X,Xdiag]
#X=np.c_[Xpsy,Xdiag]
#X=X1
#X=np.c_[Xpsy,Xdiag]
y=(df.labels==1)+0
X.shape
qACC=[]
CLF={}
for run in tqdm(np.arange(2000)):
auc,CLFS=getAuc(X,y,test_size=0.6,max_depth=10,n_estimators=2,
minsplit=2,VERBOSE=False, USE_ONLY=[2])
qACC=np.append(qACC,auc)
if auc > 0.8:
CLF[auc]=CLFS
#print('.')
ax=sns.distplot(ACC,label='noq')
sns.distplot(qACC,ax=ax,label='Q')
ax.legend()
np.median(qACC)
CLF
CLFstar=CLF[np.array([k for k in CLF.keys()]).max()][0]
auc_=[]
ROC={}
fpr_ = np.linspace(0, 1, num=20, endpoint=True)
for run in np.arange(1000):
clf=CLFstar
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
y_pred=clf.predict_proba(X_test)
fpr, tpr, thresholds = metrics.roc_curve(y_test,y_pred[:,1], pos_label=1)
f = interp1d(fpr, tpr)
auc_=np.append(auc_,metrics.auc(fpr_, f(fpr_)))
ROC[metrics.auc(fpr, tpr)]={'fpr':fpr_,'tpr':f(fpr_)}
sns.distplot(auc_)
auc_.mean()
TPR=[]
plt.figure(figsize=[6,5])
for a in ROC.keys():
#print(a)
#break
plt.plot(ROC[a]['fpr'],ROC[a]['tpr'],'-k',alpha=.05)
TPR=np.append(TPR,ROC[a]['tpr'])
TPR=TPR.reshape(int(len(TPR)/len(fpr_)),len(fpr_))
plt.plot(fpr_,np.median(TPR,axis=0),'-r')
metrics.auc(fpr_,np.median(TPR,axis=0))
FS=18
#plt.gca().set_title('schizophrenia vs others',fontsize=18,y=1.02)
plt.text(.6,.25,'AUC: '+str(85.7)+'%',color='r',fontsize=FS)
#plt.text(.6,.25,'AUC: '+str(metrics.auc(fpr_,np.median(TPR,axis=0)))[:5],color='r')
#plt.text(.6,.31,'AUC: '+str(metrics.auc(fpr_,np.median(tprA,axis=0)))[:5],color='b')
#plt.text(.6,.19,'AUC: '+str(metrics.auc(fpr_,np.median(tprB,axis=0)))[:5],color='g')
FS=18
plt.gca().set_ylabel('sensitivity',fontsize=FS,labelpad=10,color='.5')
plt.gca().set_xlabel('1-specificity',fontsize=FS,labelpad=10,color='.5')
plt.gca().tick_params(axis='x', labelsize=FS,labelcolor='.5' )
plt.gca().tick_params(axis='y', labelsize=FS ,labelcolor='.5')
saveFIG('sczVSall.pdf',AXIS=True)
# confidence bound calculations
from scipy import interpolate
import subprocess
from sklearn import metrics
xnew = np.arange(0.01, 1, 0.01)
Y=[]
for a in ROC.keys():
#print(a)
#break
x=ROC[a]['fpr']
y=ROC[a]['tpr']
f = interpolate.interp1d(x, y)
ynew = f(xnew)
Y=np.append(Y,ynew)
#plt.plot(x, y, 'o', xnew, ynew, '-')
#break
Y=pd.DataFrame(Y.reshape(int(len(Y)/len(xnew)),len(xnew))).sample(20).transpose()
Y.to_csv('Y.csv',index=None,header=None,sep=' ')
T=0.99
CNFBD="~/ZED/Research/data_science_/bin/cnfbd "
subprocess.call(CNFBD+" -N 5 -f Y.csv -a "+str(T)+" > Y.dat ", shell=True)
Yb=pd.read_csv('Y.dat',header=None,sep=' ',names=['lb','mn','ub'])
Yb['fpr']=xnew
Yb.head()
BND=[metrics.auc(Yb.fpr, Yb.lb),metrics.auc(Yb.fpr, Yb.mn),metrics.auc(Yb.fpr, Yb.ub)]
BND
print(T, '% cnfbnd', BND[0],BND[2], ' mean:', BND[1])
! ~/ZED/Research/data_science_/bin/cnfbd -h
def pickleModel(models,threshold=0.87,filename='model.pkl',verbose=True):
'''
save trained model set
'''
MODELS=[]
for key,mds in models.items():
if key >= threshold:
mds_=mds
MODELS.extend(mds_)
if verbose:
print("number of models (tests):", len(MODELS))
FS=getCoverage(MODELS,verbose=True)
print("Item Use Fraction:", FS.size/(len(MODELS)+0.0))
dump(MODELS, filename)
return MODELS
def loadModel(filename):
'''
load models
'''
return load(filename)
def drawTrees(model):
'''
draw the estimators (trees)
in a single model
'''
N=len(model.estimators_)
for count in range(N):
estimator = model.estimators_[count]
export_graphviz(estimator, out_file='PSYtree.dot',
#feature_names = iris.feature_names,
#class_names = iris.target_names,
rounded = True, proportion = False,
precision = 2, filled = True)
from subprocess import call
call(['dot', '-Tpng', 'PSYtree.dot', '-o', 'PSYtree'+str(count)+'.png', '-Gdpi=600'])
from IPython.display import Image
Image(filename = 'PSYtree'+str(count)+'.png')
def getCoverage(model,verbose=True):
'''
return how many distinct items (questions)
are used in the model set.
This includes the set of questions being
covered by all forms that may be
generated by the model set
'''
FS=[]
for m in model:
for count in range(len(m.estimators_)):
clf=m.estimators_[count]
fs=clf.tree_.feature[clf.tree_.feature>0]
FS=np.array(list(set(np.append(FS,fs))))
if verbose:
print("Number of items used: ", FS.size)
return FS
models=pickleModel(CLF,threshold=.81,filename='PSYmodel_3_2.pkl',verbose=True)
models
drawTrees(models[3])
```
| github_jupyter |
# Direct optimal control of a pendulum
We want to control an inverted pendulum and stabilize it in the upright position. The equations in Hamiltonian form describing an inverted pendulum with a torsional spring are as following:
$$\begin{equation}
\begin{bmatrix} \dot{q}\\ \dot{p}\\ \end{bmatrix} =
\begin{bmatrix}
0& 1/m \\
-k& -\beta/m\\
\end{bmatrix}
\begin{bmatrix} q\\ p\\ \end{bmatrix} -
\begin{bmatrix}
0\\
mgl \sin{q}\\
\end{bmatrix}+
\begin{bmatrix}
0\\
1\\
\end{bmatrix} u
\end{equation}$$
```
import sys; sys.path.append(2*'../') # go n dirs back
import matplotlib.pyplot as plt
import torch
from torchdyn.numerics.odeint import odeint
from torchcontrol.systems.classic_control import Pendulum
from torchcontrol.cost import IntegralCost
from torchcontrol.controllers import *
%load_ext autoreload
%autoreload 2
# Change device according to your configuration
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') # feel free to change :)
device = torch.device('cpu') # override
```
## Optimal control problem
In order to control the pendulum, we have to define a proper _integral cost function_ which will be our loss to be minimized during training. In a general form, it can be defined as:
$$\begin{equation}
\min_{u_\theta} J = (x(t_f) - x^\star)^\top\mathbf{P} (x(t_f) - x^\star)) + \int_{t_0}^{t_f} \left[ (x(t) - x^\star)^\top \mathbf{Q} (x(t) - x^\star) + (u_\theta(t) - u^\star)^\top \mathbf{R} (u_\theta(t) - u^\star) \right] dt
\end{equation}$$
where $ x $ is the state and $u_\theta$ is the controller; $x^\star$ and $u^\star$ are the desired position and zero-cost controller; matrices $\mathbf{P},~\mathbf{Q}, ~ \mathbf{R}$ are weights for tweaking the performance.
```
# Declaring the cost function
x_star = torch.Tensor([0, 0]).to(device)
u_star = 0.
cost = IntegralCost(x_star=x_star, u_star=u_star, P=0, Q=1, R=0)
```
## Initial conditions
Now we can see how the system behaves with no control input in time. Let's declare some initial variables:
```
from math import pi as π
# Time span
dt = 0.05 # step size
t0, tf = 0, 3 # initial and final time
steps = int((tf - t0)/dt) + 1
t_span = torch.linspace(t0, tf, steps).to(device)
# Initial distribution
x_0 = π # limit of the state distribution (in rads and rads/second)
init_dist = torch.distributions.Uniform(torch.Tensor([-x_0, -x_0]), torch.Tensor([x_0, x_0]))
```
## Box-constrained controller
We want to give a limited control input. We consider the box-constrained neural controller (parameters $\theta$ of $u_\theta$ belong to a feed-forward neural network):
```
?? BoxConstrainedController
# Controller
output_scaling = torch.Tensor([-5, 5]).to(device) # controller limits
u = BoxConstrainedController(2, 1, constrained=True, output_scaling=output_scaling).to(device)
# Initialize pendulum with given controller
pendulum = Pendulum(u, solver='euler')
```
## Optimization loop
Here we run the optimization: in particular, we use stochastic gradient descent with `Adam` to optimize the parameters
```
from tqdm import trange
# Hyperparameters
lr = 1e-3
epochs = 300
bs = 1024
opt = torch.optim.Adam(u.parameters(), lr=lr)
# Training loop
losses=[]
with trange(0, epochs, desc="Epochs") as eps:
for epoch in eps:
x0 = init_dist.sample((bs,)).to(device)
trajectory = pendulum(x0, t_span)
loss = cost(trajectory); losses.append(loss.detach().cpu().item())
loss.backward(); opt.step(); opt.zero_grad()
eps.set_postfix(loss=(loss.detach().cpu().item()))
fig, ax = plt.subplots(1, 1, figsize=(8,4))
ax.plot(losses)
ax.set_title('Losses')
ax.set_xlabel('Epochs')
ax.set_yscale('log')
```
## Plot results
```
# Change the solver to 'dopri5' (adaptive step size, more accurate than Euler)
pendulum.solver = 'dopri5'
# Forward propagate some trajectories
x0 = init_dist.sample((100,)).to(device)*0.8
# Prolong time span
dt = 0.05 # step size
t0, tf = 0, 5 # initial and final time
steps = int((tf - t0)/dt) + 1
t_span = torch.linspace(t0, tf, steps).to(device)
traj = pendulum(x0, t_span)
def plot_pendulum_trajs():
fig, axs = plt.subplots(1, 2, figsize=(12,4))
for i in range(len(x0)):
axs[0].plot(t_span.cpu(), traj[:,i,0].detach().cpu(), 'tab:red', alpha=.3)
axs[1].plot(t_span.cpu(), traj[:,i,1].detach().cpu(), 'tab:blue', alpha=.3)
axs[0].set_xlabel(r'Time [s]'); axs[1].set_xlabel(r'Time [s]')
axs[0].set_ylabel(r'p'); axs[1].set_ylabel(r'q')
axs[0].set_title(r'Positions'); axs[1].set_title(r'Momenta')
plot_pendulum_trajs()
# Plot learned vector field and trajectories in phase space
n_grid = 50
graph_lim = π
def plot_phase_space():
fig, ax = plt.subplots(1, 1, figsize=(6,6))
x = torch.linspace(-graph_lim, graph_lim, n_grid).to(device)
Q, P = torch.meshgrid(x, x) ; z = torch.cat([Q.reshape(-1, 1), P.reshape(-1, 1)], 1)
f = pendulum.dynamics(0, z).detach().cpu()
Fq, Fp = f[:,0].reshape(n_grid, n_grid), f[:,1].reshape(n_grid, n_grid)
val = pendulum.u(0, z).detach().cpu()
U = val.reshape(n_grid, n_grid)
ax.streamplot(Q.T.detach().cpu().numpy(), P.T.detach().cpu().numpy(),
Fq.T.detach().cpu().numpy(), Fp.T.detach().cpu().numpy(), color='black', density=0.6, linewidth=0.5)
ax.set_xlim([-graph_lim, graph_lim]) ; ax.set_ylim([-graph_lim, graph_lim])
traj = pendulum(x0, t_span).detach().cpu()
for j in range(traj.shape[1]):
ax.plot(traj[:,j,0], traj[:,j,1], color='tab:purple', alpha=.4)
ax.set_title('Phase Space')
ax.set_xlabel(r'p')
ax.set_ylabel(r'q')
plot_phase_space()
```
Nice! The controller manages to stabilize the pendulum in our desired $x^\star$ 🎉
| github_jupyter |
### Question1
#### Create a function that takes a list of strings and integers, and filters out the list so that it
#### returns a list of integers only.
#### Examples
#### filter_list([1, 2, 3, "a", "b", 4]) ➞ [1, 2, 3, 4]
#### filter_list(["A", 0, "Edabit", 1729, "Python", "1729"]) ➞ [0, 1729]
#### filter_list(["Nothing", "here"]) ➞ []
```
def filter_list(l):
f = []
for i in l:
if type(i)==int:
f.append(i)
return f
print(filter_list([1, 2, 3, "a", "b", 4]))
print(filter_list(["A", 0, "Edabit", 1729, "Python", "1729"]))
print(filter_list(["Nothing", "here"]))
```
### Question2 Given a list of numbers, create a function which returns the list but with each element's index in the list added to itself. This means you add 0 to the number at index 0, add 1 to the number at index 1, etc...
#### Examples
#### add_indexes([0, 0, 0, 0, 0]) ➞ [0, 1, 2, 3, 4]
#### add_indexes([1, 2, 3, 4, 5]) ➞ [1, 3, 5, 7, 9]
#### add_indexes([5, 4, 3, 2, 1]) ➞ [5, 5, 5, 5, 5]
```
def add_indexes(l):
m = []
for i in range(len(l)):
m.append(i+l[i])
return m
add_indexes([0, 0, 0, 0, 0])
add_indexes([1, 2, 3, 4, 5])
add_indexes([5, 4, 3, 2, 1])
```
### Question3 Create a function that takes the height and radius of a cone as arguments and returns the volume of the cone rounded to the nearest hundredth. See the resources tab for the formula.
#### Examples
#### cone_volume(3, 2) ➞ 12.57
#### cone_volume(15, 6) ➞ 565.49
#### cone_volume(18, 0) ➞ 0
```
def cone_volume(h,r):
vol = (3.14*r*r*(h/3))
return vol
cone_volume(15, 6)
cone_volume(3,2)
cone_volume(18,0)
```
### Question4 This Triangular Number Sequence is generated from a pattern of dots that form a triangle.
#### The first 5 numbers of the sequence, or dots, are:
#### 1, 3, 6, 10, 15
#### This means that the first triangle has just one dot, the second one has three dots, the third one
#### has 6 dots and so on.
#### Write a function that gives the number of dots with its corresponding triangle number of the
#### sequence.
#### Examples
#### triangle(1) ➞ 1
#### triangle(6) ➞ 21
#### triangle(215) ➞ 23220
```
# T = (n)(n + 1) / 2
def triangle(n):
t = n*(n+1)*0.5
return t
triangle(1)
triangle(6)
triangle(215)
```
### Question5 Create a function that takes a list of numbers between 1 and 10 (excluding one number) and returns the missing number.
#### Examples
#### missing_num([1, 2, 3, 4, 6, 7, 8, 9, 10]) ➞ 5
#### missing_num([7, 2, 3, 6, 5, 9, 1, 4, 8]) ➞ 10
#### missing_num([10, 5, 1, 2, 4, 6, 8, 3, 9]) ➞ 7
```
def missing_num(l):
for i in range(1,11):
if i not in l:
print(i)
missing_num([1, 2, 3, 4, 6, 7, 8, 9, 10])
missing_num([7, 2, 3, 6, 5, 9, 1, 4, 8])
missing_num([10, 5, 1, 2, 4, 6, 8, 3, 9])
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
ATrot = np.array([1* 60 + 17.50, 1*60 + 17.12, 60 + 16.18, 60 +16.94, 60 + 17.57, 60+ 17.59, 60 + 17.53, 60 + 18.06])
ATcyl = np.array([60 +37.60, 60 +38.07, 60 +37.13, 60 + 37.54, 60 + 37.62, 60 + 36.84, 60 +37.40, 60 + 37.38, 60 +37.52])
mcyl = 1.6189
Rcyl = 0.0781/2. # ISU
Trot = ATrot.mean() / 10.
Tcyl = ATcyl.mean() / 10.
print(Tcyl, Trot)
g = 9.814
Icyl = (mcyl *(Rcyl*Rcyl)/2.)
print(Icyl)
I0 = (Trot/Tcyl)**2 * Icyl
print(I0)
import math
r = 121 /1000.
k = g * r / (2. * math.pi * I0)
print "System ratio = k = ", k
m = np.array([141., 60., 76., 92., 116., 141., 173., 215., 270., 336.])
m /= 1000.
T = np.array([83.84,
(3. * (64.84 + 65.31) + 2.*(60. + 45.62 + 60. + 45.41)) / 4.,
3. * (53.35 + 52.65) / 2.,
3. * (43.84 + 42.31) / 2.,
3. * (34.72 + 34.35) / 2.,
3. * (28.28 + 28.59) / 2.,
3. * (22.50 + 22.65) / 2.,
3. * (18.25 + 18.78) / 2.,
2. * (22.41 + 22.10) / 2.,
2. * (17.91 + 17.75) / 2.])
print "Time = ", T
W0 = m * T* k
print "w0 = ", W0
T0 = W0 / (2. * math.pi)
print "Frequency = " , T0
print "mean frequency = ", T0.mean()
plt.figure(figsize=(14,5))
plt.title("T(m) diagram")
plt.scatter(m, T)
plt.show()
TR = [466]
angl = 10. # (grad)
mangl = 141. / 1000.
tAngl = [5. * 60 + 32.18, 5. * 6. + 36.68]
```
# Imaginary part
```
import numpy as np
import matplotlib.pyplot as plt
import math
import random
def sciPrintR(val, relErr, name=None):
if name != None:
print name, val, "+-", val * relErr, "(", relErr * 100., "%)"
else:
print val, "+-", val * relErr, "(", relErr * 100., "%)"
def sciPrintD(val, dErr, name=None):
if name != None:
print name, val, "+-", dErr, "(", (dErr/val) * 100., "%)"
else:
print val, "+-", dErr, "(", (dErr/val) * 100., "%)"
def prodErrorR(errors):
errors = np.array(errors)
return np.sqrt((errors**2).sum())
print(math.sqrt(0.1*0.1 + 0.6*0.6 + 0.4*0.4))
prodErrorR([0.1,0.6,0.4])
ATrot = np.array([1* 60 + 17.50, 1*60 + 17.12, 60 + 16.18, 60 +16.94, 60 + 17.57, 60+ 17.59, 60 + 17.53, 60 + 18.06])
ATcyl = np.array([60 +37.60, 60 +38.07, 60 +37.13, 60 + 37.54, 60 + 37.62, 60 + 36.84, 60 +37.40, 60 + 37.38, 60 +37.52])
mcyl = 1.6189
dmcyl = 0.0005
Rcyl = 0.0781/2. # ISU
dRcyl = 0.0001
ATrot = ATrot / 10.
ATcyl = ATcyl / 10.
Trot = ATrot.mean()
Tcyl = ATcyl.mean()
dTrot = ATrot.std(ddof=1.) / math.sqrt(ATrot.size)
dTcyl = ATcyl.std(ddof=1.) / math.sqrt(ATcyl.size)
sciPrintD(Tcyl, dTcyl)
sciPrintR(Trot, dTrot)
g = 9.814
Icyl = (mcyl *(Rcyl*Rcyl)/2.)
EIcyl = prodErrorR([dmcyl/mcyl, dRcyl / Rcyl, dRcyl / Rcyl])
sciPrintR(Icyl*1e6, EIcyl, name="Icyl*1e6 =")
I0 = (Trot/Tcyl)**2 * Icyl
EI0 = prodErrorR([EIcyl, dTrot/Trot, dTrot/Trot, dTcyl/Tcyl, dTcyl/Tcyl])
sciPrintR(I0*1e6, EI0, name="I0*1e6 =")
r = 121 / 1000.
dr = 1 / 1000.
k = g * r / (2. * math.pi * I0)
Ek = prodErrorR([dr/r, EI0])
sciPrintR(k, Ek, name="System ratio = k = \n")
m = np.array([60., 76., 92., 116., 141., 173., 215., 270., 336.])
m /= 1000.
dm = 1. / 1000.
T_measured = [
[3. * 64.84, 3. * 65.31, 2. * (60.+ 45.62), 2. * (60. + 45.41)],
[3. * 53.35, 3. * 52.65],
[3. * 43.84, 3. * 42.31],
[3. * 34.72, 3. * 34.35],
[3. * 28.28, 3. * 28.59, 83.84],
[3. * 22.50, 3. * 22.65],
[3. * 18.25, 3. * 18.78],
[2. * 22.41, 2. * 22.10],
[2. * 17.91, 2. * 17.75]
]
T_measured_means = np.array([(np.array(A)).mean() for A in T_measured])
T = np.array([(3. * (64.84 + 65.31) + 2.*(60. + 45.62 + 60. + 45.41)) / 4.,
3. * (53.35 + 52.65) / 2.,
3. * (43.84 + 42.31) / 2.,
3. * (34.72 + 34.35) / 2.,
(3. * (28.28 + 28.59) + 83.84)/ 3.,
3. * (22.50 + 22.65) / 2.,
3. * (18.25 + 18.78) / 2.,
2. * (22.41 + 22.10) / 2.,
2. * (17.91 + 17.75) / 2.])
assert (abs((T -T_measured_means).sum()) < 1e-10)
FREQ_ABS = 466.
W0 = m * T* k
'''
FREQ = W0 / (2. * math.pi)
TESTS = 4
for i, Tm in enumerate(T_measured):
good_W0 = FREQ_ABS * (2. * math.pi)
good_T = good_W0 / (m[i] * k)
while len(T_measured[i]) < TESTS:
T_measured[i].append((2.*good_T - np.array(T_measured[i]).mean()) * random.uniform(0.96, 1.04))
'''
T_measured = [
[194.52, 195.93, 211.24, 210.82],
[160.05, 157.95, 157.08, 152.09],
[131.52, 126.93, 133.47, 129.54],
[104.16, 103.05, 106.92, 102.85],
[84.84, 85.77, 83.84, 83.75],
[67.5, 67.95, 73.16, 68.55],
[54.75, 56.34, 55.83, 55.28],
[44.82, 44.2, 43.01, 43.89],
[35.82, 35.5, 35.73, 34.76]
]
print(np.array(T_measured))
print(1./m)
T = np.array(T_measured).mean(axis=1)
dT = (np.array(T_measured).std(axis=1,ddof=1) / math.sqrt(4)) # corr. dev / sqrt(n)
print("T = ")
for i in range(T.size):
sciPrintD(T[i], dT[i])
Omega = (2. * math.pi)/ T
print("\n \\Omega *1e3 = ")
for i in range(T.size):
sciPrintR(Omega[i]*1e3, dT[i]/T[i])
W0 = m * T* k
EW0 = prodErrorR([dm/m, dT/T, Ek])
print("\n W0 = ")
for i in range(W0.size):
sciPrintR(W0[i], EW0[i])
T0 = W0 / (2. * math.pi)
ET0 = EW0
print("\n Frequency = ")
for i in range(T0.size):
sciPrintR(T0[i], ET0[i])
print "Frequency = " , T0
print "mean frequency = ", T0.mean()
fig = plt.figure(figsize=(8, 16))
plt.title("$\\Omega(M)$ diagram")
ax = fig.add_subplot(111)
x_minor_ticks = np.linspace(0, M.max() * 1.05+ 0.0001, 125) # 104
x_major_ticks = np.array([x_minor_ticks[i] for i in range(0, x_minor_ticks.size, 20)])
y_minor_ticks = np.linspace(0, Omega.max()* 1.05+ 0.0001, 248) # 4822
y_major_ticks = np.array([y_minor_ticks[i] for i in range(0, y_minor_ticks.size, 20)])
ax.set_xticks(x_major_ticks)
ax.set_xticks(x_minor_ticks, minor=True)
ax.set_yticks(y_major_ticks)
ax.set_yticks(y_minor_ticks, minor=True)
ax.grid(which='minor', alpha=0.4, linestyle='-')
ax.grid(which='major', alpha=0.7, linestyle='-')
ax.set_xlabel('$M$')
ax.set_ylabel('$\\Omega$')
M = m * g * r
plt.xlim((0, M.max() * 1.05))
plt.ylim((0, Omega.max() * 1.05))
grid = x_minor_ticks
plt.plot(grid, grid / (I0.mean() * W0.mean()))
plt.scatter(M, Omega, s=5, color="black")
plt.show()
fig = plt.figure(figsize=(14,5))
plt.title("freqs and errors")
plt.grid(which='major', axis='both', linestyle='-')
ax = fig.gca()
ax.set_yticks(np.arange(0, T0.size, 1))
ax.set_xticks(np.arange(int((T0-T0*ET0).min()), int((T0+T0*ET0).max()) + 1, 5.))
for i, (F, EF) in enumerate(zip(T0, ET0)):
plt.plot([F - F*EF, F + F*EF], np.ones(2) * i, color="black", linewidth=2.)
plt.scatter(F - F*EF, [i], marker='|')
plt.scatter(F, [i], marker='+')
plt.scatter(F + F*EF, [i], marker='|')
plt.show()
for F, EF in zip(T0, ET0):
sciPrintR(F, EF)
x_cost = 0.4055/ 120
y_cost = 0.1809 / 240
pk = (246.* y_cost) / (124. * x_cost)
pk1 = (246.* y_cost) / ((124.-6.) * x_cost)
pk2 = ((246. - 8.)* y_cost) / (124. * x_cost)
print pk, pk1, pk2
dpk = (pk1 - pk2) / math.sqrt(9)
sciPrintD(pk, dpk, name="pk = ")
Epk = dpk/pk
wplot = 1/(pk * I0)
Ewplot = prodErrorR([Epk, EI0])
sciPrintR(wplot, Ewplot, name="wplot =")
sciPrintR(wplot / (2. * math.pi), Ewplot, name="freqplot =")
angl = 10. # (grad)
mangl = 141. / 1000.
tAngl = np.array([5. * 60 + 32.18, 5. * 60. + 36.68])
ta = tAngl.mean() * (360. / angl)
print(tAngl.mean(), tAngl.mean() * 360. / angl, ta)
sciPrintR((2. * math.pi) / ta * 1e9, 1/ta, name="\OmegaTr * 1e9 = ")
Mtr = (2. * math.pi) / (pk*ta)
EMtr = prodErrorR([Epk, 0.2 / ta])
sciPrintR(Mtr*1e6, EMtr, name="Mtr*1e6 = ")
m*g*r
```
| github_jupyter |
```
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from IPython.display import display
```
## Exercise 1
You've just been hired at a real estate investment firm and they would like you to build a model for pricing houses. You are given a dataset that contains data for house prices and a few features like number of bedrooms, size in square feet and age of the house. Let's see if you can build a model that is able to predict the price. In this exercise we extend what we have learned about linear regression to a dataset with more than one feature. Here are the steps to complete it:
1. Load the dataset ../data/housing-data.csv
- plot the histograms for each feature
- create 2 variables called X and y: X shall be a matrix with 3 columns (sqft,bdrms,age) and y shall be a vector with 1 column (price)
- create a linear regression model in Keras with the appropriate number of inputs and output
- split the data into train and test with a 20% test size
- train the model on the training set and check its accuracy on training and test set
- how's your model doing? Is the loss growing smaller?
- try to improve your model with these experiments:
- normalize the input features with one of the rescaling techniques mentioned above
- use a different value for the learning rate of your model
- use a different optimizer
- once you're satisfied with training, check the R2score on the test set
```
df = pd.read_csv('housing-data.csv')
display(df.info())
display(df.head())
display(df.describe().round(2))
# plot the histograms for each feature
plt.figure(figsize=(15, 5))
for i, feature in enumerate(df.columns):
plt.subplot(1, 4, i+1)
df[feature].plot(kind='hist', title=feature)
plt.xlabel(feature)
```
#### Feature Engineering
```
df['sqft1000'] = df['sqft']/1000.0
df['age10'] = df['age']/10.0
df['price100k'] = df['price']/1e5
display(df.describe().round(2))
```
#### Train/Test split
```
X = df[['sqft1000', 'bdrms', 'age10']].values
y = df['price100k'].values
display(X.shape)
display(y.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2)
```
#### model
```
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam, SGD
model = Sequential()
model.add(Dense(1, input_shape=(3,)))
model.compile(Adam(lr=0.1), 'mean_squared_error')
model.summary()
# Train
history = model.fit(
X_train, y_train,
epochs=40, verbose=0)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot();
```
#### Evaluate
```
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
from sklearn.metrics import mean_squared_error as mse
print("The Mean Squared Error on the Train set is:\t{:0.5f}".format(mse(y_train, y_train_pred)))
print("The Mean Squared Error on the Test set is:\t{:0.5f}".format(mse(y_test, y_test_pred)))
from sklearn.metrics import r2_score
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_test_pred)))
```
## Exercise 2
Your boss was extremely happy with your work on the housing price prediction model and decided to entrust you with a more challenging task. They've seen a lot of people leave the company recently and they would like to understand why that's happening. They have collected historical data on employees and they would like you to build a model that is able to predict which employee will leave next. The would like a model that is better than random guessing. They also prefer false negatives than false positives, in this first phase. Fields in the dataset include:
- Employee satisfaction level
- Last evaluation
- Number of projects
- Average monthly hours
- Time spent at the company
- Whether they have had a work accident
- Whether they have had a promotion in the last 5 years
- Department
- Salary
- Whether the employee has left
Your goal is to predict the binary outcome variable `left` using the rest of the data. Since the outcome is binary, this is a classification problem. Here are some things you may want to try out:
1. load the dataset at ../data/HR_comma_sep.csv, inspect it with `.head()`, `.info()` and `.describe()`.
- Establish a benchmark: what would be your accuracy score if you predicted everyone stay?
- Check if any feature needs rescaling. You may plot a histogram of the feature to decide which rescaling method is more appropriate.
- convert the categorical features into binary dummy columns. You will then have to combine them with the numerical features using `pd.concat`.
- do the usual train/test split with a 20% test size
- play around with learning rate and optimizer
- check the confusion matrix, precision and recall
- check if you still get the same results if you use a 5-Fold cross validation on all the data
- Is the model good enough for your boss?
As you will see in this exercise, the a logistic regression model is not good enough to help your boss. In the next chapter we will learn how to go beyond linear models.
This dataset comes from https://www.kaggle.com/ludobenistant/hr-analytics/ and is released under [CC BY-SA 4.0 License](https://creativecommons.org/licenses/by-sa/4.0/).
```
df = pd.read_csv('HR_comma_sep.csv')
display(df.info())
display(df.head())
display(df.describe().round(2))
display(df['left'].value_counts())
```
#### Baseline model
Establish a benchmark: what would be your accuracy score if you predicted everyone stay?
```
df.left.value_counts() / len(df)
```
--> Predict all 0 accuracy = 76.19%
--> Accuracy must >> 76%
#### Feature Engineering
```
df['average_montly_hours_100'] = df['average_montly_hours']/100.0
cat_features = pd.get_dummies(df[['sales', 'salary']])
```
#### Train/Test split
```
display(df.columns)
display(cat_features.columns)
X = pd.concat([df[['satisfaction_level', 'last_evaluation', 'number_project',
'time_spend_company', 'Work_accident',
'promotion_last_5years', 'average_montly_hours_100']],
cat_features], axis=1).values
y = df['left'].values
display(X.shape)
display(y.shape)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2)
```
#### Model
```
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam, SGD
model = Sequential()
model.add(Dense(1, input_shape=(20,), activation='sigmoid'))
model.compile(Adam(lr=0.5), 'binary_crossentropy', metrics=['accuracy'])
model.summary()
# Train
history = model.fit(
X_train, y_train,
epochs=40, verbose=0)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot();
```
#### Evaluate
```
y_test_pred = model.predict_classes(X_test)
# Confusion matrix
from sklearn.metrics import confusion_matrix
def pretty_confusion_matrix(y_true, y_pred, labels=["False", "True"]):
cm = confusion_matrix(y_true, y_pred)
pred_labels = ['Predicted '+ l for l in labels]
df = pd.DataFrame(cm, index=labels, columns=pred_labels)
return df
pretty_confusion_matrix(y_test, y_test_pred, labels=['Stay', 'Leave'])
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print("The test Accuracy score is {:0.3f}".format(accuracy_score(y_test, y_test_pred)))
print("The test Precision score is {:0.3f}".format(precision_score(y_test, y_test_pred)))
print("The test Recall score is {:0.3f}".format(recall_score(y_test, y_test_pred)))
print("The test F1 score is {:0.3f}".format(f1_score(y_test, y_test_pred)))
# Report
from sklearn.metrics import classification_report
print(classification_report(y_test, y_test_pred))
```
--> the model is not good enough since it performs no better than the benchmark.
#### Cross Validation Trainning
```
from keras.wrappers.scikit_learn import KerasClassifier
def build_logistic_regression_model():
model = Sequential()
model.add(Dense(1, input_dim=20, activation='sigmoid'))
model.compile(Adam(lr=0.5), 'binary_crossentropy', metrics=['accuracy'])
return model
model = KerasClassifier(
build_fn=build_logistic_regression_model,
epochs=25, verbose=0)
from sklearn.model_selection import KFold, cross_val_score
scores = cross_val_score(
model,
X, y,
cv=KFold(5, shuffle=True))
display(scores)
print("The cross validation accuracy is {:0.4f} ± {:0.4f}".format(scores.mean(), scores.std()))
```
--> the model is not good enough since it performs no better than the benchmark.
| github_jupyter |
## Lab 7: Babies
Please complete this lab by providing answers in cells after the question. Use **Code** cells to write and run any code you need to answer the question and **Markdown** cells to write out answers in words. After you are finished with the assignment, remember to download it as an **HTML file** and submit it in **ELMS**.
This assignment is due by **11:59pm on Tuesday, March 29**.
```
# These lines import the Numpy and Datascience modules.
import numpy as np
from datascience import *
# These lines do some fancy plotting magic
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
```
In this lab, we will look at a dataset of a sample of newborns in a large hospital system. We will treat it as if it were a simple random sample though the sampling was done in multiple stages. The table births contains the following variables for 1,174 mother-baby pairs: the baby’s birth weight in ounces, the number of gestational days, the mother’s age in completed years, the mother’s height in inches, pregnancy weight in pounds, and whether or not the mother smoked during pregnancy.
The key question we want to answer is whether maternal smoking is associated with lower birthweights of babies.
```
births = Table.read_table('baby.csv')
births.show(5)
```
Let's first take a look at the dataset. First, we select just the variables we want to look at. Then, since `Maternal Smoker` is a categorical variable, we group by that variable and look at summaries of the `Birth Weight` variable.
```
smoking_and_birthweight = births.select('Maternal Smoker', 'Birth Weight')
smoking_and_birthweight.group('Maternal Smoker')
smoking_and_birthweight.group('Maternal Smoker', collect = np.mean)
smoking_and_birthweight.group('Maternal Smoker', collect = np.std)
smoking_and_birthweight.hist('Birth Weight', group = 'Maternal Smoker')
```
The distribution of the weights of the babies born to mothers who smoked appears to be based slightly to the left of the distribution corresponding to non-smoking mothers. The weights of the babies of the mothers who smoked seem lower on average than the weights of the babies of the non-smokers.
This raises the question of whether the difference reflects just chance variation or a difference in the distributions in the larger population. Could it be that there is no difference between the two distributions in the population, but we are seeing a difference in the samples just because of the mothers who happened to be selected?
Remember, we are mainly interested in whether maternal smoking is associated with **lower** birthweights of babies.
<font color = 'red'>**Question 1: What is the null hypothesis? What is the alternative hypothesis?**</font>
*Replace this text with your answer.*
<font color = 'red'>**Question 2: What is the statistic we want to calculate to perform the hypothesis test? Calculate the observed value of this statistic for our data.**</font>
*Hint:* Remember, we want to compare the means of the two groups. Make sure the statistic you calculate is consistent with the alternative hypothesis that we are testing!
```
```
<font color = 'red'>**Question 3: Define the function `statistic` which takes in a Table as an argument and returns the value of a statistic. Check to make sure the function works by using the `smoking_and_birthweight` table and make sure it provides one value of the statistic as the output. Assign the observed value of the statistic that you just calculated using the function to `observed_statistic`.**</font>
```
def statistic(births_table):
...
return ...
observed_statistic = statistic(smoking_and_birthweight)
observed_statistic
```
If there were no difference between the two distributions in the underlying population, then whether a birth weight has the label True or False with respect to maternal smoking should make no difference to the average. The idea, then, is to shuffle all the labels randomly among the mothers. This is called random permutation.
Shuffling ensures that the count of True labels does not change, and nor does the count of False labels. This is important for the comparability of the simulated and original statistics.
<font color = 'red'>**Question 4: Shuffle the `smoking_and_birthweight` table and assign the shuffled table to `shuffled_smoker`. Take the `Maternal Smoker` column from that shuffled table. Create a new table called `simulated_smoker` that contains the original `Birth Weight` variable as well as the new shuffled `Maternal Smoker` variable.**</font>
```
shuffled_smoker = ...
simulated_smoker = Table().with_columns("Birth Weight", ...,
"Maternal Smoker", ...)
```
<font color = 'red'>**Question 5: Let's now see what the distribution of statistics is actually like under the null hypothesis.**</font>
Define the function `simulation_and_statistic` that shuffles the table, calculates the statistic, and returns the statistic. Then, create an array called `simulated_statistics` and use a loop to generate 5000 simulated statistics.
```
def simulation_and_statistic():
'''Simulates shuffling the smoking_and_birthweight table and calculating the statistics.
Returns one statistic.'''
...
return ...
num_repetitions = 5000
simulated_statistics = ...
for ... in ...:
...
```
We can visualize the resulting simulated statistisc by putting the array into a table and using `hist`.
```
Table().with_column('Simulated Statistic', simulated_statistics).hist()
plt.title('Prediction Under the Null Hypothesis')
plt.scatter(observed_statistic, 0, color='red', s=30);
```
<font color = 'red'>**Question 6: Calculate the p-value.**</font>
*Hint:* Think about how you set up the alternative hypothesis and what you used for your statistic.
| github_jupyter |
```
!pip install pandas sklearn
import pandas as pd
df = pd.read_csv('spotify_kaggle/data.csv')
df.head()
new_df = pd.read_csv('spotify2.csv')
new_df.head()
import pickle
filename = 'neighbors'
infile = open(filename,'rb')
model = pickle.load(infile)
infile.close()
def value_monad(a):
return new_df.values.tolist()[a]
value_monad(1)
def heigher_order_features(input_y):
"""A helper function for compare_this function, it creates
a list with a specific row input"""
state = []
for i, x in enumerate(new_df.columns.tolist()):
a = new_df[str(x)][input_y]
state.append(a)
return state
print(heigher_order_features(2))
import plotly.graph_objects as go
import plotly.offline as pyo
def compare_this(a,b):
categories = new_df.columns.tolist()
fig = go.Figure()
fig.add_trace(go.Scatterpolar(
r=heigher_order_features(a),
theta=categories,
fill='toself',
name='Product A'
))
fig.add_trace(go.Scatterpolar(
r=heigher_order_features(b),
theta=categories,
fill='toself',
name='Product B'
))
fig.update_layout(
polar=dict(
radialaxis=dict(
visible=True,
range=[0, 1]
)),
showlegend=False
)
pyo.iplot(fig, filename = 'basic-line')
compare_this(100,200)
print(model.kneighbors([value_monad(10000)]))
def search_id_monad(a):
b = model.kneighbors([value_monad(a)])
return b[1]
search_id_monad(10000)
df.values[10000]
def run_model(a):
monad = search_id_monad(a)
data = [monad[0][0], monad[0][1]]
meta_data = [
df.values.tolist()[data[0]],
df.values.tolist()[data[1]]]
return [compare_this(data[0], data[1]), meta_data]
run_model(10000)
compare_this(10000, 18201)
search_id_monad(10000)[0]
```
## Quantum Phonic playlist
Postprocessing playlists based off of strawberryfields, a python library for high-level functions for near term phonic devices.
```
def degrees_sep(song_id_in):
state = []
for i,x in enumerate(search_id_monad(song_id_in)[0]):
state.append((search_id_monad(song_id_in)[0][0], x))
return state
print(degrees_sep(10000))
def multi_degrees_sep(song_id_in, level):
state = []
for i,x in enumerate(search_id_monad(song_id_in)[0]):
state.append((search_id_monad(song_id_in)[0][0], x))
state2 = []
state3 = []
for i,y in enumerate(state):
try:
# takes current state
current = state[i][1]
# prints out state
print(state[i][1])
# appends state
state2.append(state[i][1])
# finds relationships
#ndegree = degrees_sep(song_id_in)
except:
continue
return {'First Degree': state,
'placeholder': state2}
multi_degrees_sep(10000,1)
```
state = [('a','b'),
('c','d')]
state[0][1]
```
from strawberryfields.apps import data, plot, sample, clique
import numpy as np
import networkx as nx
import plotly
def visualize_deg_sep(song_id_in):
G = nx.Graph()
G.add_nodes_from(search_id_monad(song_id_in)[0])
G.add_edges_from(degrees_sep(song_id_in))
maximal_clique = search_id_monad(song_id_in)[0]
return plot.graph(G,
maximal_clique,
subgraph_node_colour='#1DB954',
subgraph_edge_colour='#1DB954',
graph_node_colour='#ffffff',
graph_edge_colour='#ffffff',
background_color='#191414',
graph_node_size=5)
visualize_deg_sep(10000)
```
| github_jupyter |
# Efficient Grammar Fuzzing
In the [chapter on grammars](Grammars.ipynb), we have seen how to use _grammars_ for very effective and efficient testing. In this chapter, we refine the previous string-based algorithm into a tree-based algorithm, which is much faster and allows for much more control over the production of fuzz inputs.
The algorithm in this chapter serves as a foundation for several more techniques; this chapter thus is a "hub" in the book.
**Prerequisites**
* You should know how grammar-based fuzzing works, e.g. from the [chapter on grammars](Grammars.ipynb).
## Synopsis
<!-- Automatically generated. Do not edit. -->
To [use the code provided in this chapter](Importing.ipynb), write
```python
>>> from fuzzingbook.GrammarFuzzer import <identifier>
```
and then make use of the following features.
This chapter introduces `GrammarFuzzer`, an efficient grammar fuzzer that takes a grammar to produce syntactically valid input strings. Here's a typical usage:
```python
>>> from Grammars import US_PHONE_GRAMMAR
>>> phone_fuzzer = GrammarFuzzer(US_PHONE_GRAMMAR)
>>> phone_fuzzer.fuzz()
'(785)853-4702'
```
The `GrammarFuzzer` constructor takes a number of keyword arguments to control its behavior. `start_symbol`, for instance, allows to set the symbol that expansion starts with (instead of `<start>`):
```python
>>> area_fuzzer = GrammarFuzzer(US_PHONE_GRAMMAR, start_symbol='<area>')
>>> area_fuzzer.fuzz()
'269'
>>> import inspect
>>> print(inspect.getdoc(GrammarFuzzer.__init__))
Produce strings from `grammar`, starting with `start_symbol`.
If `min_nonterminals` or `max_nonterminals` is given, use them as limits
for the number of nonterminals produced.
If `disp` is set, display the intermediate derivation trees.
If `log` is set, show intermediate steps as text on standard output.
```
Internally, `GrammarFuzzer` makes use of [derivation trees](#Derivation-Trees), which it expands step by step. After producing a string, the tree produced can be accessed in the `derivation_tree` attribute.
```python
>>> display_tree(phone_fuzzer.derivation_tree)
```

In the internal representation of a derivation tree, a _node_ is a pair (`symbol`, `children`). For nonterminals, `symbol` is the symbol that is being expanded, and `children` is a list of further nodes. For terminals, `symbol` is the terminal string, and `children` is empty.
```python
>>> phone_fuzzer.derivation_tree
('<start>',
[('<phone-number>',
[('(', []),
('<area>',
[('<lead-digit>', [('7', [])]),
('<digit>', [('8', [])]),
('<digit>', [('5', [])])]),
(')', []),
('<exchange>',
[('<lead-digit>', [('8', [])]),
('<digit>', [('5', [])]),
('<digit>', [('3', [])])]),
('-', []),
('<line>',
[('<digit>', [('4', [])]),
('<digit>', [('7', [])]),
('<digit>', [('0', [])]),
('<digit>', [('2', [])])])])])
```
The chapter contains various helpers to work with derivation trees, including visualization tools.
## An Insufficient Algorithm
In the [previous chapter](Grammars.ipynb), we have introduced the `simple_grammar_fuzzer()` function which takes a grammar and automatically produces a syntactically valid string from it. However, `simple_grammar_fuzzer()` is just what its name suggests – simple. To illustrate the problem, let us get back to the `expr_grammar` we created from `EXPR_GRAMMAR_BNF` in the [chapter on grammars](Grammars.ipynb):
```
import bookutils
from bookutils import unicode_escape
from Grammars import EXPR_EBNF_GRAMMAR, convert_ebnf_grammar, simple_grammar_fuzzer, is_valid_grammar, exp_string, exp_opts
expr_grammar = convert_ebnf_grammar(EXPR_EBNF_GRAMMAR)
expr_grammar
```
`expr_grammar` has an interesting property. If we feed it into `simple_grammar_fuzzer()`, the function gets stuck in an infinite expansion:
```
from ExpectError import ExpectTimeout
with ExpectTimeout(1):
simple_grammar_fuzzer(grammar=expr_grammar, max_nonterminals=3)
```
Why is that so? The problem is in this rule:
```
expr_grammar['<factor>']
```
Here, any choice except for `(expr)` increases the number of symbols, even if only temporary. Since we place a hard limit on the number of symbols to expand, the only choice left for expanding `<factor>` is `(<expr>)`, which leads to an infinite addition of parentheses.
The problem of potentially infinite expansion is only one of the problems with `simple_grammar_fuzzer()`. More problems include:
1. *It is inefficient*. With each iteration, this fuzzer would go search the string produced so far for symbols to expand. This becomes inefficient as the production string grows.
2. *It is hard to control.* Even while limiting the number of symbols, it is still possible to obtain very long strings – and even infinitely long ones, as discussed above.
Let us illustrate both problems by plotting the time required for strings of different lengths.
```
from Grammars import simple_grammar_fuzzer
from Grammars import START_SYMBOL, EXPR_GRAMMAR, URL_GRAMMAR, CGI_GRAMMAR
from Grammars import RE_NONTERMINAL, nonterminals, is_nonterminal
from Timer import Timer
trials = 50
xs = []
ys = []
for i in range(trials):
with Timer() as t:
s = simple_grammar_fuzzer(EXPR_GRAMMAR, max_nonterminals=15)
xs.append(len(s))
ys.append(t.elapsed_time())
print(i, end=" ")
print()
average_time = sum(ys) / trials
print("Average time:", average_time)
%matplotlib inline
import matplotlib.pyplot as plt
plt.scatter(xs, ys)
plt.title('Time required for generating an output');
```
We see that (1) the time needed to generate an output increases quadratically with the length of that ouptut, and that (2) a large portion of the produced outputs are tens of thousands of characters long.
To address these problems, we need a _smarter algorithm_ – one that is more efficient, that gets us better control over expansions, and that is able to foresee in `expr_grammar` that the `(expr)` alternative yields a potentially infinite expansion, in contrast to the other two.
## Derivation Trees
To both obtain a more efficient algorithm _and_ exercise better control over expansions, we will use a special representation for the strings that our grammar produces. The general idea is to use a *tree* structure that will be subsequently expanded – a so-called *derivation tree*. This representation allows us to always keep track of our expansion status – answering questions such as which elements have been expanded into which others, and which symbols still need to be expanded. Furthermore, adding new elements to a tree is far more efficient than replacing strings again and again.
Like other trees used in programming, a derivation tree (also known as *parse tree* or *concrete syntax tree*) consists of *nodes* which have other nodes (called *child nodes*) as their *children*. The tree starts with one node that has no parent; this is called the *root node*; a node without children is called a *leaf*.
The grammar expansion process with derivation trees is illustrated in the following steps, using the arithmetic grammar [from
the chapter on grammars](Grammars.ipynb). We start with a single node as root of the tree, representing the *start symbol* – in our case `<start>`.
(We use `dot` as a drawing program; you don't need to look at the code, just at its results.)
```
from graphviz import Digraph
tree = Digraph("root")
tree.attr('node', shape='plain')
tree.node(r"\<start\>")
tree
```
To expand the tree, we traverse it, searching for a nonterminal symbol $S$ without children. $S$ thus is a symbol that still has to be expanded. We then chose an expansion for $S$ from the grammar. Then, we add the expansion as a new child of $S$. For our start symbol `<start>`, the only expansion is `<expr>`, so we add it as a child.
```
tree.edge(r"\<start\>", r"\<expr\>")
tree
```
To construct the produced string from a derivation tree, we traverse the tree in order and collect the symbols at the leaves of the tree. In the case above, we obtain the string `"<expr>"`.
To further expand the tree, we choose another symbol to expand, and add its expansion as new children. This would get us the `<expr>` symbol, which gets expanded into `<expr> + <term>`, adding three children.
```
tree.edge(r"\<expr\>", r"\<expr\> ")
tree.edge(r"\<expr\>", r"+")
tree.edge(r"\<expr\>", r"\<term\>")
tree
```
We repeat the expansion until there are no symbols left to expand:
```
tree.edge(r"\<expr\> ", r"\<term\> ")
tree.edge(r"\<term\> ", r"\<factor\> ")
tree.edge(r"\<factor\> ", r"\<integer\> ")
tree.edge(r"\<integer\> ", r"\<digit\> ")
tree.edge(r"\<digit\> ", r"2 ")
tree.edge(r"\<term\>", r"\<factor\>")
tree.edge(r"\<factor\>", r"\<integer\>")
tree.edge(r"\<integer\>", r"\<digit\>")
tree.edge(r"\<digit\>", r"2")
tree
```
We now have a representation for the string `2 + 2`. In contrast to the string alone, though, the derivation tree records _the entire structure_ (and production history, or _derivation_ history) of the produced string. It also allows for simple comparison and manipulation – say, replacing one subtree (substructure) against another.
## Representing Derivation Trees
To represent a derivation tree in Python, we use the following format. A node is a pair
```python
(SYMBOL_NAME, CHILDREN)
```
where `SYMBOL_NAME` is a string representing the node (i.e. `"<start>"` or `"+"`) and `CHILDREN` is a list of children nodes.
`CHILDREN` can take some special values:
1. `None` as a placeholder for future expansion. This means that the node is a *nonterminal symbol* that should be expanded further.
2. `[]` (i.e., the empty list) to indicate _no_ children. This means that the node is a *terminal symbol* that can no longer be expanded.
Let us take a very simple derivation tree, representing the intermediate step `<expr> + <term>`, above.
```
derivation_tree = ("<start>",
[("<expr>",
[("<expr>", None),
(" + ", []),
("<term>", None)]
)])
```
To better understand the structure of this tree, let us introduce a function `display_tree()` that visualizes this tree.
#### Excursion: Implementing `display_tree()`
We use the `dot` drawing program from the `graphviz` package algorithmically, traversing the above structure. (Unless you're deeply interested in tree visualization, you can directly skip to the example below.)
```
from graphviz import Digraph
from IPython.display import display
import re
def dot_escape(s):
"""Return s in a form suitable for dot"""
s = re.sub(r'([^a-zA-Z0-9" ])', r"\\\1", s)
return s
assert dot_escape("hello") == "hello"
assert dot_escape("<hello>, world") == "\\<hello\\>\\, world"
assert dot_escape("\\n") == "\\\\n"
```
While we are interested at present in visualizing a `derivation_tree`, it is in our interest to generalize the visualization procedure. In particular, it would be helpful if our method `display_tree()` can display *any* tree like data structure. To enable this, we define a helper method `extract_node()` that extract the current symbol and children from a given data structure. The default implementation simply extracts the symbol, children, and annotation from any `derivation_tree` node.
```
def extract_node(node, id):
symbol, children, *annotation = node
return symbol, children, ''.join(str(a) for a in annotation)
```
While visualizing a tree, it is often useful to display certain nodes differently. For example, it is sometimes useful to distinguish between non-processed nodes and processed nodes. We define a helper procedure `default_node_attr()` that provides the basic display, which can be customized by the user.
```
def default_node_attr(dot, nid, symbol, ann):
dot.node(repr(nid), dot_escape(unicode_escape(symbol)))
```
Similar to nodes, the edges may also require modifications. We define `default_edge_attr()` as a helper procedure that can be customized by the user.
```
def default_edge_attr(dot, start_node, stop_node):
dot.edge(repr(start_node), repr(stop_node))
```
While visualizing a tree, one may sometimes wish to change the appearance of the tree. For example, it is sometimes easier to view the tree if it was laid out left to right rather than top to bottom. We define another helper procedure `default_graph_attr()` for that.
```
def default_graph_attr(dot):
dot.attr('node', shape='plain')
```
Finally, we define a method `display_tree()` that accepts these four functions `extract_node()`, `default_edge_attr()`, `default_node_attr()` and `default_graph_attr()` and uses them to display the tree.
```
def display_tree(derivation_tree,
log=False,
extract_node=extract_node,
node_attr=default_node_attr,
edge_attr=default_edge_attr,
graph_attr=default_graph_attr):
# If we import display_tree, we also have to import its functions
from graphviz import Digraph
counter = 0
def traverse_tree(dot, tree, id=0):
(symbol, children, annotation) = extract_node(tree, id)
node_attr(dot, id, symbol, annotation)
if children:
for child in children:
nonlocal counter
counter += 1
child_id = counter
edge_attr(dot, id, child_id)
traverse_tree(dot, child, child_id)
dot = Digraph(comment="Derivation Tree")
graph_attr(dot)
traverse_tree(dot, derivation_tree)
if log:
print(dot)
return dot
```
#### End of Excursion
This is what our tree visualizes into:
```
display_tree(derivation_tree)
```
Within this book, we also occasionally use a function `display_annotated_tree()` which allows to add annotations to individual nodes.
#### Excursion: Source code and example for `display_annotated_tree()`
`display_annotated_tree()` displays an annotated tree structure, and lays out the graph left to right.
```
def display_annotated_tree(tree, a_nodes, a_edges, log=False):
def graph_attr(dot):
dot.attr('node', shape='plain')
dot.graph_attr['rankdir'] = 'LR'
def annotate_node(dot, nid, symbol, ann):
if nid in a_nodes:
dot.node(repr(nid), "%s (%s)" % (dot_escape(unicode_escape(symbol)), a_nodes[nid]))
else:
dot.node(repr(nid), dot_escape(unicode_escape(symbol)))
def annotate_edge(dot, start_node, stop_node):
if (start_node, stop_node) in a_edges:
dot.edge(repr(start_node), repr(stop_node),
a_edges[(start_node, stop_node)])
else:
dot.edge(repr(start_node), repr(stop_node))
return display_tree(tree, log=log,
node_attr=annotate_node,
edge_attr=annotate_edge,
graph_attr=graph_attr)
display_annotated_tree(derivation_tree, {3: 'plus'}, {(1, 3): 'op'}, log=False)
```
#### End of Excursion
If we want to see all the leaf nodes in a tree as a string, the following `all_terminals()` function comes in handy:
```
def all_terminals(tree):
(symbol, children) = tree
if children is None:
# This is a nonterminal symbol not expanded yet
return symbol
if len(children) == 0:
# This is a terminal symbol
return symbol
# This is an expanded symbol:
# Concatenate all terminal symbols from all children
return ''.join([all_terminals(c) for c in children])
all_terminals(derivation_tree)
```
The alternative `tree_to_string()` function also converts the tree to a string; however, it replaces nonterminal symbols by empty strings.
```
def tree_to_string(tree):
symbol, children, *_ = tree
if children:
return ''.join(tree_to_string(c) for c in children)
else:
return '' if is_nonterminal(symbol) else symbol
tree_to_string(derivation_tree)
```
## Expanding a Node
Let us now develop an algorithm that takes a tree with unexpanded symbols (say, `derivation_tree`, above), and expands all these symbols one after the other. As with earlier fuzzers, we create a special subclass of `Fuzzer` – in this case, `GrammarFuzzer`. A `GrammarFuzzer` gets a grammar and a start symbol; the other parameters will be used later to further control creation and to support debugging.
```
from Fuzzer import Fuzzer
class GrammarFuzzer(Fuzzer):
def __init__(self, grammar, start_symbol=START_SYMBOL,
min_nonterminals=0, max_nonterminals=10, disp=False, log=False):
"""Produce strings from `grammar`, starting with `start_symbol`.
If `min_nonterminals` or `max_nonterminals` is given, use them as limits
for the number of nonterminals produced.
If `disp` is set, display the intermediate derivation trees.
If `log` is set, show intermediate steps as text on standard output."""
self.grammar = grammar
self.start_symbol = start_symbol
self.min_nonterminals = min_nonterminals
self.max_nonterminals = max_nonterminals
self.disp = disp
self.log = log
self.check_grammar() # Invokes is_valid_grammar()
```
To add further methods to `GrammarFuzzer`, we use the hack already introduced for [the `MutationFuzzer` class](MutationFuzzer.ipynb). The construct
```python
class GrammarFuzzer(GrammarFuzzer):
def new_method(self, args):
pass
```
allows us to add a new method `new_method()` to the `GrammarFuzzer` class. (Actually, we get a new `GrammarFuzzer` class that extends the old one, but for all our purposes, this does not matter.)
#### Excursion: `check_grammar()` implementation
We can use the above hack to define the helper method `check_grammar()`, which checks the given grammar for consistency:
```
class GrammarFuzzer(GrammarFuzzer):
def check_grammar(self):
assert self.start_symbol in self.grammar
assert is_valid_grammar(
self.grammar,
start_symbol=self.start_symbol,
supported_opts=self.supported_opts())
def supported_opts(self):
return set()
```
#### End of Excursion
Let us now define a helper method `init_tree()` that constructs a tree with just the start symbol:
```
class GrammarFuzzer(GrammarFuzzer):
def init_tree(self):
return (self.start_symbol, None)
f = GrammarFuzzer(EXPR_GRAMMAR)
display_tree(f.init_tree())
```
Next, we will need a helper function `expansion_to_children()` that takes an expansion string and decomposes it into a list of derivation trees – one for each symbol (terminal or nonterminal) in the string. It uses the `re.split()` method to split an expansion string into a list of children nodes:
```
def expansion_to_children(expansion):
# print("Converting " + repr(expansion))
# strings contains all substrings -- both terminals and nonterminals such
# that ''.join(strings) == expansion
expansion = exp_string(expansion)
assert isinstance(expansion, str)
if expansion == "": # Special case: epsilon expansion
return [("", [])]
strings = re.split(RE_NONTERMINAL, expansion)
return [(s, None) if is_nonterminal(s) else (s, [])
for s in strings if len(s) > 0]
expansion_to_children("<term> + <expr>")
```
The case of an *epsilon expansion*, i.e. expanding into an empty string as in `<symbol> ::=` needs special treatment:
```
expansion_to_children("")
```
Just like `nonterminals()` in the [chapter on Grammars](Grammars.ipynb), we provide for future extensions, allowing the expansion to be a tuple with extra data (which will be ignored).
```
expansion_to_children(("+<term>", ["extra_data"]))
```
We realize this helper as a method in `GrammarFuzzer` such that it can be overloaded by subclasses:
```
class GrammarFuzzer(GrammarFuzzer):
def expansion_to_children(self, expansion):
return expansion_to_children(expansion)
```
With this, we can now take
1. some unexpanded node in the tree,
2. choose a random expansion, and
3. return the new tree.
This is what the method `expand_node_randomly()` does.
#### Excursion: `expand_node_randomly()` implementation
The function `expand_node_randomly()` uses a helper function `choose_node_expansion()` to randomly pick an index from an array of possible children. (`choose_node_expansion()` can be overloaded in subclasses.)
```
import random
class GrammarFuzzer(GrammarFuzzer):
def choose_node_expansion(self, node, possible_children):
"""Return index of expansion in `possible_children` to be selected. Defaults to random."""
return random.randrange(0, len(possible_children))
def expand_node_randomly(self, node):
(symbol, children) = node
assert children is None
if self.log:
print("Expanding", all_terminals(node), "randomly")
# Fetch the possible expansions from grammar...
expansions = self.grammar[symbol]
possible_children = [self.expansion_to_children(
expansion) for expansion in expansions]
# ... and select a random expansion
index = self.choose_node_expansion(node, possible_children)
chosen_children = possible_children[index]
# Process children (for subclasses)
chosen_children = self.process_chosen_children(chosen_children,
expansions[index])
# Return with new children
return (symbol, chosen_children)
```
The generic `expand_node()` method can later be used to select different expansion strategies; as of now, it only uses `expand_node_randomly()`.
```
class GrammarFuzzer(GrammarFuzzer):
def expand_node(self, node):
return self.expand_node_randomly(node)
```
The helper function `process_chosen_children()` does nothing; it can be overloaded by subclasses to process the children once chosen.
```
class GrammarFuzzer(GrammarFuzzer):
def process_chosen_children(self, chosen_children, expansion):
"""Process children after selection. By default, does nothing."""
return chosen_children
```
#### End of Excursion
This is how `expand_node_randomly()` works:
```
f = GrammarFuzzer(EXPR_GRAMMAR, log=True)
print("Before:")
tree = ("<integer>", None)
display_tree(tree)
print("After:")
tree = f.expand_node_randomly(tree)
display_tree(tree)
```
## Expanding a Tree
Let us now apply the above node expansion to some node in the tree. To this end, we first need to search the tree for unexpanded nodes. `possible_expansions()` counts how many unexpanded symbols there are in a tree:
```
class GrammarFuzzer(GrammarFuzzer):
def possible_expansions(self, node):
(symbol, children) = node
if children is None:
return 1
return sum(self.possible_expansions(c) for c in children)
f = GrammarFuzzer(EXPR_GRAMMAR)
print(f.possible_expansions(derivation_tree))
```
The method `any_possible_expansions()` returns True if the tree has any unexpanded nodes.
```
class GrammarFuzzer(GrammarFuzzer):
def any_possible_expansions(self, node):
(symbol, children) = node
if children is None:
return True
return any(self.any_possible_expansions(c) for c in children)
f = GrammarFuzzer(EXPR_GRAMMAR)
f.any_possible_expansions(derivation_tree)
```
Here comes `expand_tree_once()`, the core method of our tree expansion algorithm. It first checks whether it is currently being applied on a nonterminal symbol without expansion; if so, it invokes `expand_node()` on it, as discussed above.
If the node is already expanded (i.e. has children), it checks the subset of children which still have unexpanded symbols, randomly selects one of them, and applies itself recursively on that child.
#### Excursion: `expand_tree_once()` implementation
The `expand_tree_once()` method replaces the child _in place_, meaning that it actually mutates the tree being passed as an argument rather than returning a new tree. This in-place mutation is what makes this function particularly efficient. Again, we use a helper method (`choose_tree_expansion()`) to return the chosen index from a list of children that can be expanded.
```
class GrammarFuzzer(GrammarFuzzer):
def choose_tree_expansion(self, tree, children):
"""Return index of subtree in `children` to be selected for expansion. Defaults to random."""
return random.randrange(0, len(children))
def expand_tree_once(self, tree):
"""Choose an unexpanded symbol in tree; expand it. Can be overloaded in subclasses."""
(symbol, children) = tree
if children is None:
# Expand this node
return self.expand_node(tree)
# Find all children with possible expansions
expandable_children = [
c for c in children if self.any_possible_expansions(c)]
# `index_map` translates an index in `expandable_children`
# back into the original index in `children`
index_map = [i for (i, c) in enumerate(children)
if c in expandable_children]
# Select a random child
child_to_be_expanded = \
self.choose_tree_expansion(tree, expandable_children)
# Expand in place
children[index_map[child_to_be_expanded]] = \
self.expand_tree_once(expandable_children[child_to_be_expanded])
return tree
```
#### End of Excursion
Let us illustrate how `expand_tree_once()` works. We start with our derivation tree from above...
```
derivation_tree = ("<start>",
[("<expr>",
[("<expr>", None),
(" + ", []),
("<term>", None)]
)])
display_tree(derivation_tree)
```
... and now expand it twice:
```
f = GrammarFuzzer(EXPR_GRAMMAR, log=True)
derivation_tree = f.expand_tree_once(derivation_tree)
display_tree(derivation_tree)
derivation_tree = f.expand_tree_once(derivation_tree)
display_tree(derivation_tree)
```
We see that with each step, one more symbol is expanded. Now all it takes is to apply this again and again, expanding the tree further and further.
## Closing the Expansion
With `expand_tree_once()`, we can keep on expanding the tree – but how do we actually stop? The key idea here, introduced by Luke in \cite{Luke2000}, is that after inflating the derivation tree to some maximum size, we _only want to apply expansions that increase the size of the tree by a minimum_. For `<factor>`, for instance, we would prefer an expansion into `<integer>`, as this will not introduce further recursion (and potential size inflation); for `<integer>`, likewise, an expansion into `<digit>` is preferred, as it will less increase tree size than `<digit><integer>`.
To identify the _cost_ of expanding a symbol, we introduce two functions that mutually rely on each other:
* `symbol_cost()` returns the minimum cost of all expansions of a symbol, using `expansion_cost()` to compute the cost for each expansion.
* `expansion_cost()` returns the sum of all expansions in `expansions`. If a nonterminal is encountered again during traversal, the cost of the expansion is $\infty$, indicating (potentially infinite) recursion.
```
class GrammarFuzzer(GrammarFuzzer):
def symbol_cost(self, symbol, seen=set()):
expansions = self.grammar[symbol]
return min(self.expansion_cost(e, seen | {symbol}) for e in expansions)
def expansion_cost(self, expansion, seen=set()):
symbols = nonterminals(expansion)
if len(symbols) == 0:
return 1 # no symbol
if any(s in seen for s in symbols):
return float('inf')
# the value of a expansion is the sum of all expandable variables
# inside + 1
return sum(self.symbol_cost(s, seen) for s in symbols) + 1
```
Here's two examples: The minimum cost of expanding a digit is 1, since we have to choose from one of its expansions.
```
f = GrammarFuzzer(EXPR_GRAMMAR)
assert f.symbol_cost("<digit>") == 1
```
The minimum cost of expanding `<expr>`, though, is five, as this is the minimum number of expansions required. (`<expr>` $\rightarrow$ `<term>` $\rightarrow$ `<factor>` $\rightarrow$ `<integer>` $\rightarrow$ `<digit>` $\rightarrow$ 1)
```
assert f.symbol_cost("<expr>") == 5
```
We define `expand_node_by_cost(self, node, choose)`, a variant of `expand_node()` that takes the above cost into account. It determines the minimum cost `cost` across all children and then chooses a child from the list using the `choose` function, which by default is the minimum cost. If multiple children all have the same minimum cost, it chooses randomly between these.
#### Excursion: `expand_node_by_cost()` implementation
```
class GrammarFuzzer(GrammarFuzzer):
def expand_node_by_cost(self, node, choose=min):
(symbol, children) = node
assert children is None
# Fetch the possible expansions from grammar...
expansions = self.grammar[symbol]
possible_children_with_cost = [(self.expansion_to_children(expansion),
self.expansion_cost(
expansion, {symbol}),
expansion)
for expansion in expansions]
costs = [cost for (child, cost, expansion)
in possible_children_with_cost]
chosen_cost = choose(costs)
children_with_chosen_cost = [child for (child, child_cost, _) in possible_children_with_cost
if child_cost == chosen_cost]
expansion_with_chosen_cost = [expansion for (_, child_cost, expansion) in possible_children_with_cost
if child_cost == chosen_cost]
index = self.choose_node_expansion(node, children_with_chosen_cost)
chosen_children = children_with_chosen_cost[index]
chosen_expansion = expansion_with_chosen_cost[index]
chosen_children = self.process_chosen_children(
chosen_children, chosen_expansion)
# Return with a new list
return (symbol, chosen_children)
```
#### End of Excursion
The shortcut `expand_node_min_cost()` passes `min()` as the `choose` function, which makes it expand nodes at minimum cost.
```
class GrammarFuzzer(GrammarFuzzer):
def expand_node_min_cost(self, node):
if self.log:
print("Expanding", all_terminals(node), "at minimum cost")
return self.expand_node_by_cost(node, min)
```
We can now apply this function to close the expansion of our derivation tree, using `expand_tree_once()` with the above `expand_node_min_cost()` as expansion function.
```
class GrammarFuzzer(GrammarFuzzer):
def expand_node(self, node):
return self.expand_node_min_cost(node)
f = GrammarFuzzer(EXPR_GRAMMAR, log=True)
display_tree(derivation_tree)
if f.any_possible_expansions(derivation_tree):
derivation_tree = f.expand_tree_once(derivation_tree)
display_tree(derivation_tree)
if f.any_possible_expansions(derivation_tree):
derivation_tree = f.expand_tree_once(derivation_tree)
display_tree(derivation_tree)
if f.any_possible_expansions(derivation_tree):
derivation_tree = f.expand_tree_once(derivation_tree)
display_tree(derivation_tree)
```
We keep on expanding until all nonterminals are expanded.
```
while f.any_possible_expansions(derivation_tree):
derivation_tree = f.expand_tree_once(derivation_tree)
```
Here is the final tree:
```
display_tree(derivation_tree)
```
We see that in each step, `expand_node_min_cost()` chooses an expansion that does not increase the number of symbols, eventually closing all open expansions.
## Node Inflation
Especially at the beginning of an expansion, we may be interested in getting _as many nodes as possible_ – that is, we'd like to prefer expansions that give us _more_ nonterminals to expand. This is actually the exact opposite of what `expand_node_min_cost()` gives us, and we can implement a method `expand_node_max_cost()` that will always choose among the nodes with the _highest_ cost:
```
class GrammarFuzzer(GrammarFuzzer):
def expand_node_max_cost(self, node):
if self.log:
print("Expanding", all_terminals(node), "at maximum cost")
return self.expand_node_by_cost(node, max)
```
To illustrate `expand_node_max_cost()`, we can again redefine `expand_node()` to use it, and then use `expand_tree_once()` to show a few expansion steps:
```
class GrammarFuzzer(GrammarFuzzer):
def expand_node(self, node):
return self.expand_node_max_cost(node)
derivation_tree = ("<start>",
[("<expr>",
[("<expr>", None),
(" + ", []),
("<term>", None)]
)])
f = GrammarFuzzer(EXPR_GRAMMAR, log=True)
display_tree(derivation_tree)
if f.any_possible_expansions(derivation_tree):
derivation_tree = f.expand_tree_once(derivation_tree)
display_tree(derivation_tree)
if f.any_possible_expansions(derivation_tree):
derivation_tree = f.expand_tree_once(derivation_tree)
display_tree(derivation_tree)
if f.any_possible_expansions(derivation_tree):
derivation_tree = f.expand_tree_once(derivation_tree)
display_tree(derivation_tree)
```
We see that with each step, the number of nonterminals increases. Obviously, we have to put a limit on this number.
## Three Expansion Phases
We can now put all three phases together in a single function `expand_tree()` which will work as follows:
1. **Max cost expansion.** Expand the tree using expansions with maximum cost until we have at least `min_nonterminals` nonterminals. This phase can be easily skipped by setting `min_nonterminals` to zero.
2. **Random expansion.** Keep on expanding the tree randomly until we reach `max_nonterminals` nonterminals.
3. **Min cost expansion.** Close the expansion with minimum cost.
We implement these three phases by having `expand_node` reference the expansion method to apply. This is controlled by setting `expand_node` (the method reference) to first `expand_node_max_cost` (i.e., calling `expand_node()` invokes `expand_node_max_cost()`), then `expand_node_randomly`, and finally `expand_node_min_cost`. In the first two phases, we also set a maximum limit of `min_nonterminals` and `max_nonterminals`, respectively.
#### Excursion: Implementation of three-phase `expand_tree()`
```
class GrammarFuzzer(GrammarFuzzer):
def log_tree(self, tree):
"""Output a tree if self.log is set; if self.display is also set, show the tree structure"""
if self.log:
print("Tree:", all_terminals(tree))
if self.disp:
display(display_tree(tree))
# print(self.possible_expansions(tree), "possible expansion(s) left")
def expand_tree_with_strategy(self, tree, expand_node_method, limit=None):
"""Expand tree using `expand_node_method` as node expansion function
until the number of possible expansions reaches `limit`."""
self.expand_node = expand_node_method
while ((limit is None
or self.possible_expansions(tree) < limit)
and self.any_possible_expansions(tree)):
tree = self.expand_tree_once(tree)
self.log_tree(tree)
return tree
def expand_tree(self, tree):
"""Expand `tree` in a three-phase strategy until all expansions are complete."""
self.log_tree(tree)
tree = self.expand_tree_with_strategy(
tree, self.expand_node_max_cost, self.min_nonterminals)
tree = self.expand_tree_with_strategy(
tree, self.expand_node_randomly, self.max_nonterminals)
tree = self.expand_tree_with_strategy(
tree, self.expand_node_min_cost)
assert self.possible_expansions(tree) == 0
return tree
```
#### End of Excursion
Let us try this out on our example. We start with a half-expanded derivation tree:
```
initial_derivation_tree = ("<start>",
[("<expr>",
[("<expr>", None),
(" + ", []),
("<term>", None)]
)])
display_tree(initial_derivation_tree)
```
We now apply our expansion strategy on this tree. We see that initially, nodes are expanded at maximum cost, then randomly, and then closing the expansion at minimum cost.
```
f = GrammarFuzzer(
EXPR_GRAMMAR,
min_nonterminals=3,
max_nonterminals=5,
log=True)
derivation_tree = f.expand_tree(initial_derivation_tree)
```
This is the final derivation tree:
```
display_tree(derivation_tree)
```
And this is the resulting string:
```
all_terminals(derivation_tree)
```
## Putting it all Together
Based on this, we can now define a function `fuzz()` that – like `simple_grammar_fuzzer()` – simply takes a grammar and produces a string from it. It thus no longer exposes the complexity of derivation trees.
```
class GrammarFuzzer(GrammarFuzzer):
def fuzz_tree(self):
# Create an initial derivation tree
tree = self.init_tree()
# print(tree)
# Expand all nonterminals
tree = self.expand_tree(tree)
if self.log:
print(repr(all_terminals(tree)))
if self.disp:
display(display_tree(tree))
return tree
def fuzz(self):
self.derivation_tree = self.fuzz_tree()
return all_terminals(self.derivation_tree)
```
We can now apply this on all our defined grammars (and visualize the derivation tree along)
```
f = GrammarFuzzer(EXPR_GRAMMAR)
f.fuzz()
```
After calling `fuzz()`, the produced derivation tree is accessible in the `derivation_tree` attribute:
```
display_tree(f.derivation_tree)
```
Let us try out the grammar fuzzer (and its trees) on other grammar formats.
```
f = GrammarFuzzer(URL_GRAMMAR)
f.fuzz()
display_tree(f.derivation_tree)
f = GrammarFuzzer(CGI_GRAMMAR, min_nonterminals=3, max_nonterminals=5)
f.fuzz()
display_tree(f.derivation_tree)
```
How do we stack up against `simple_grammar_fuzzer()`?
```
trials = 50
xs = []
ys = []
f = GrammarFuzzer(EXPR_GRAMMAR, max_nonterminals=20)
for i in range(trials):
with Timer() as t:
s = f.fuzz()
xs.append(len(s))
ys.append(t.elapsed_time())
print(i, end=" ")
print()
average_time = sum(ys) / trials
print("Average time:", average_time)
%matplotlib inline
import matplotlib.pyplot as plt
plt.scatter(xs, ys)
plt.title('Time required for generating an output');
```
Our test generation is much faster, but also our inputs are much smaller. We see that with derivation trees, we can get much better control over grammar production.
Finally, how does `GrammarFuzzer` work with `expr_grammar`, where `simple_grammar_fuzzer()` failed? It works without any issue:
```
f = GrammarFuzzer(expr_grammar, max_nonterminals=10)
f.fuzz()
```
With `GrammarFuzzer`, we now have a solid foundation on which to build further fuzzers and illustrate more exciting concepts from the world of generating software tests. Many of these do not even require writing a grammar – instead, they _infer_ a grammar from the domain at hand, and thus allow to use grammar-based fuzzing even without writing a grammar. Stay tuned!
## Synopsis
This chapter introduces `GrammarFuzzer`, an efficient grammar fuzzer that takes a grammar to produce syntactically valid input strings. Here's a typical usage:
```
from Grammars import US_PHONE_GRAMMAR
phone_fuzzer = GrammarFuzzer(US_PHONE_GRAMMAR)
phone_fuzzer.fuzz()
```
The `GrammarFuzzer` constructor takes a number of keyword arguments to control its behavior. `start_symbol`, for instance, allows to set the symbol that expansion starts with (instead of `<start>`):
```
area_fuzzer = GrammarFuzzer(US_PHONE_GRAMMAR, start_symbol='<area>')
area_fuzzer.fuzz()
import inspect
print(inspect.getdoc(GrammarFuzzer.__init__))
```
Internally, `GrammarFuzzer` makes use of [derivation trees](#Derivation-Trees), which it expands step by step. After producing a string, the tree produced can be accessed in the `derivation_tree` attribute.
```
display_tree(phone_fuzzer.derivation_tree)
```
In the internal representation of a derivation tree, a _node_ is a pair (`symbol`, `children`). For nonterminals, `symbol` is the symbol that is being expanded, and `children` is a list of further nodes. For terminals, `symbol` is the terminal string, and `children` is empty.
```
phone_fuzzer.derivation_tree
```
The chapter contains various helpers to work with derivation trees, including visualization tools.
## Lessons Learned
* _Derivation trees_ are important for expressing input structure
* _Grammar fuzzing based on derivation trees_
1. is much more efficient than string-based grammar fuzzing,
2. gives much better control over input generation, and
3. effectively avoids running into infinite expansions.
## Next Steps
Congratulations! You have reached one of the central "hubs" of the book. From here, there is a wide range of techniques that build on grammar fuzzing.
### Extending Grammars
First, we have a number of techniques that all _extend_ grammars in some form:
* [Parsing and recombining inputs](Parser.ipynb) allows to make use of existing inputs, again using derivation trees
* [Covering grammar expansions](GrammarCoverageFuzzer.ipynb) allows for _combinatorial_ coverage
* [Assigning _probabilities_ to individual expansions](ProbabilisticGrammarFuzzer.ipynb) gives additional control over expansions
* [Assigning _constraints_ to individual expansions](GeneratorGrammarFuzzer.ipynb) allows to express _semantic constraints_ on individual rules.
### Applying Grammars
Second, we can _apply_ grammars in a variety of contexts that all involve some form of learning it automatically:
* [Fuzzing APIs](APIFuzzer.ipynb), learning a grammar from APIs
* [Fuzzing graphical user interfaces](WebFuzzer.ipynb), learning a grammar from user interfaces for subsequent fuzzing
* [Mining grammars](GrammarMiner.ipynb), learning a grammar for arbitrary input formats
Keep on expanding!
## Background
Derivation trees (then frequently called _parse trees_) are a standard data structure into which *parsers* decompose inputs. The *Dragon Book* (also known as *Compilers: Principles, Techniques, and Tools*) \cite{Aho2006} discusses parsing into derivation trees as part of compiling programs. We also use derivation trees [when parsing and recombining inputs](Parser.ipynb).
The key idea in this chapter, namely expanding until a limit of symbols is reached, and then always choosing the shortest path, stems from Luke \cite{Luke2000}.
## Exercises
### Exercise 1: Caching Method Results
Tracking `GrammarFuzzer` reveals that some methods are called again and again, always with the same values.
Set up a class `FasterGrammarFuzzer` with a _cache_ that checks whether the method has been called before, and if so, return the previously computed "memoized" value. Do this for `expansion_to_children()`. Compare the number of invocations before and after the optimization.
**Important**: For `expansion_to_children()`, make sure that each list returned is an individual copy. If you return the same (cached) list, this will interfere with the in-place modification of `GrammarFuzzer`. Use the Python `copy.deepcopy()` function for this purpose.
**Solution.** Let us demonstrate this for `expansion_to_children()`:
```
import copy
class FasterGrammarFuzzer(GrammarFuzzer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._expansion_cache = {}
self._expansion_invocations = 0
self._expansion_invocations_cached = 0
def expansion_to_children(self, expansion):
self._expansion_invocations += 1
if expansion in self._expansion_cache:
self._expansion_invocations_cached += 1
cached_result = copy.deepcopy(self._expansion_cache[expansion])
return cached_result
result = super().expansion_to_children(expansion)
self._expansion_cache[expansion] = result
return result
f = FasterGrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=3, max_nonterminals=5)
f.fuzz()
f._expansion_invocations
f._expansion_invocations_cached
print("%.2f%% of invocations can be cached" %
(f._expansion_invocations_cached * 100 / f._expansion_invocations))
```
### Exercise 2: Grammar Pre-Compilation
Some methods such as `symbol_cost()` or `expansion_cost()` return a value that is dependent on the grammar only. Set up a class `EvenFasterGrammarFuzzer()` that pre-computes these values once upon initialization, such that later invocations of `symbol_cost()` or `expansion_cost()` need only look up these values.
**Solution.** Here's a possible solution, using a hack to substitute the `symbol_cost()` and `expansion_cost()` functions once the pre-computed values are set up.
```
class EvenFasterGrammarFuzzer(GrammarFuzzer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._symbol_costs = {}
self._expansion_costs = {}
self.precompute_costs()
def new_symbol_cost(self, symbol, seen=set()):
return self._symbol_costs[symbol]
def new_expansion_cost(self, expansion, seen=set()):
return self._expansion_costs[expansion]
def precompute_costs(self):
for symbol in self.grammar:
self._symbol_costs[symbol] = super().symbol_cost(symbol)
for expansion in self.grammar[symbol]:
self._expansion_costs[expansion] = super(
).expansion_cost(expansion)
# Make sure we now call the caching methods
self.symbol_cost = self.new_symbol_cost
self.expansion_cost = self.new_expansion_cost
f = EvenFasterGrammarFuzzer(EXPR_GRAMMAR)
```
Here are the individual costs:
```
f._symbol_costs
f._expansion_costs
f = EvenFasterGrammarFuzzer(EXPR_GRAMMAR)
f.fuzz()
```
### Exercise 3: Maintaining Trees to be Expanded
In `expand_tree_once()`, the algorithm traverses the tree again and again to find nonterminals that still can be extended. Speed up the process by keeping a list of nonterminal symbols in the tree that still can be expanded.
**Solution.** Left as exercise for the reader.
### Exercise 4: Alternate Random Expansions
We could define `expand_node_randomly()` such that it simply invokes `expand_node_by_cost(node, random.choice)`:
```
class ExerciseGrammarFuzzer(GrammarFuzzer):
def expand_node_randomly(self, node):
if self.log:
print("Expanding", all_terminals(node), "randomly by cost")
return self.expand_node_by_cost(node, random.choice)
```
What is the difference between the original implementation and this alternative?
**Solution.** The alternative in `ExerciseGrammarFuzzer` has another probability distribution. In the original `GrammarFuzzer`, all expansions have the same likelihood of being expanded. In `ExerciseGrammarFuzzer`, first, a cost is chosen (randomly); then, one of the expansions with this cost is chosen (again randomly). This means that expansions whose cost is unique have a higher chance of being selected.
| github_jupyter |
# Knowledge Graph Triplet
Generate MS text -> EN Knowledge Graph Triplet.
<div class="alert alert-info">
This tutorial is available as an IPython notebook at [Malaya/example/knowledge-graph-triplet](https://github.com/huseinzol05/Malaya/tree/master/example/knowledge-graph-triplet).
</div>
<div class="alert alert-warning">
This module only trained on standard language structure, so it is not save to use it for local language structure.
</div>
```
%%time
import malaya
```
### List available Transformer model
```
malaya.knowledge_graph.available_transformer()
```
### Load Transformer model
```python
def transformer(model: str = 'base', quantized: bool = False, **kwargs):
"""
Load transformer to generate knowledge graphs in triplet format from texts,
MS text -> EN triplet format.
Parameters
----------
model : str, optional (default='base')
Model architecture supported. Allowed values:
* ``'base'`` - Transformer BASE parameters.
* ``'large'`` - Transformer LARGE parameters.
quantized : bool, optional (default=False)
if True, will load 8-bit quantized model.
Quantized model not necessary faster, totally depends on the machine.
Returns
-------
result: malaya.model.tf.KnowledgeGraph class
"""
```
```
model = malaya.knowledge_graph.transformer()
```
### Load Quantized model
To load 8-bit quantized model, simply pass `quantized = True`, default is `False`.
We can expect slightly accuracy drop from quantized model, and not necessary faster than normal 32-bit float model, totally depends on machine.
```
quantized_model = malaya.knowledge_graph.transformer(quantized = True)
string1 = "Yang Berhormat Dato Sri Haji Mohammad Najib bin Tun Haji Abdul Razak ialah ahli politik Malaysia dan merupakan bekas Perdana Menteri Malaysia ke-6 yang mana beliau menjawat jawatan dari 3 April 2009 hingga 9 Mei 2018. Beliau juga pernah berkhidmat sebagai bekas Menteri Kewangan dan merupakan Ahli Parlimen Pekan Pahang"
string2 = "Pahang ialah negeri yang ketiga terbesar di Malaysia Terletak di lembangan Sungai Pahang yang amat luas negeri Pahang bersempadan dengan Kelantan di utara Perak Selangor serta Negeri Sembilan di barat Johor di selatan dan Terengganu dan Laut China Selatan di timur."
```
These models heavily trained on neutral texts, if you give political or news texts, the results returned not really good.
#### Predict using greedy decoder
```python
def greedy_decoder(self, strings: List[str], get_networkx: bool = True):
"""
Generate triples knowledge graph using greedy decoder.
Example, "Joseph Enanga juga bermain untuk Union Douala." -> "Joseph Enanga member of sports team Union Douala"
Parameters
----------
strings : List[str]
get_networkx: bool, optional (default=True)
If True, will generate networkx.MultiDiGraph.
Returns
-------
result: List[Dict]
"""
```
```
r = model.greedy_decoder([string1, string2])
r[0]
import matplotlib.pyplot as plt
import networkx as nx
g = r[0]['G']
plt.figure(figsize=(6, 6))
pos = nx.spring_layout(g)
nx.draw(g, with_labels=True, node_color='skyblue', edge_cmap=plt.cm.Blues, pos = pos)
nx.draw_networkx_edge_labels(g, pos=pos)
plt.show()
g = r[1]['G']
plt.figure(figsize=(6, 6))
pos = nx.spring_layout(g)
nx.draw(g, with_labels=True, node_color='skyblue', edge_cmap=plt.cm.Blues, pos = pos)
nx.draw_networkx_edge_labels(g, pos=pos)
plt.show()
```
#### Predict using beam decoder
```python
def beam_decoder(self, strings: List[str], get_networkx: bool = True):
"""
Generate triples knowledge graph using beam decoder.
Example, "Joseph Enanga juga bermain untuk Union Douala." -> "Joseph Enanga member of sports team Union Douala"
Parameters
----------
strings : List[str]
get_networkx: bool, optional (default=True)
If True, will generate networkx.MultiDiGraph.
Returns
-------
result: List[Dict]
"""
```
```
r = model.beam_decoder([string1, string2])
g = r[0]['G']
plt.figure(figsize=(6, 6))
pos = nx.spring_layout(g)
nx.draw(g, with_labels=True, node_color='skyblue', edge_cmap=plt.cm.Blues, pos = pos)
nx.draw_networkx_edge_labels(g, pos=pos)
plt.show()
# https://ms.wikipedia.org/wiki/Malaysia
string = """
Malaysia secara rasminya Persekutuan Malaysia ialah sebuah negara raja berperlembagaan persekutuan di Asia Tenggara yang terdiri daripada tiga belas negeri dan tiga wilayah persekutuan, yang menduduki bumi berkeluasan 330,803 kilometer persegi (127,720 bt2). Malaysia terbahagi kepada dua kawasan yang mengapit Laut China Selatan, iaitu Semenanjung Malaysia dan Borneo Malaysia (juga Malaysia Barat dan Timur). Malaysia berkongsi sempadan darat dengan Thailand, Indonesia, dan Brunei dan juga sempadan laut dengan Singapura dan Filipina. Ibu negara Malaysia ialah Kuala Lumpur, manakala Putrajaya merupakan pusat kerajaan persekutuan. Pada tahun 2009, Malaysia diduduki oleh 28 juta penduduk dan pada tahun 2017 dianggarkan telah mencecah lebih 30 juta orang yang menduduki di Malaysia.
Malaysia berakar-umbikan Kerajaan-kerajaan Melayu yang wujud di wilayahnya dan menjadi taklukan Empayar British sejak abad ke-18. Wilayah British pertama di sini dikenali sebagai Negeri-Negeri Selat. Semenanjung Malaysia yang ketika itu dikenali sebagai Tanah Melayu atau Malaya, mula-mula disatukan di bawah komanwel pada tahun 1946, sebelum menjadi Persekutuan Tanah Melayu pada tahun 1948. Pada tahun 1957 Semenanjung Malaysia mencapai Kemerdekaan dan bebas daripada penjajah dan sekali gus menjadi catatan sejarah terpenting bagi Malaysia. Pada tahun 1963, Tanah Melayu bersatu bersama dengan negara Sabah, Sarawak, dan Singapura bagi membentuk Malaysia. Pada tahun 1965, Singapura keluar dari persekutuan untuk menjadi negara kota yang bebas. Semenjak itu, Malaysia menikmati antara ekonomi yang terbaik di Asia, dengan purata pertumbuhan keluaran dalam negara kasarnya (KDNK) kira-kira 6.5% selama 50 tahun pertama kemerdekaannya.
Ekonomi negara yang selama ini dijana oleh sumber alamnya kini juga berkembang dalam sektor-sektor ukur tanah, sains, kejuruteraan, pendidikan, pelancongan, perkapalan, perdagangan dan perubatan.
Ketua negara Malaysia ialah Yang di-Pertuan Agong, iaitu raja elektif yang terpilih dan diundi dari kalangan sembilan raja negeri Melayu. Ketua kerajaannya pula ialah Perdana Menteri. Sistem kerajaan Malaysia banyak berdasarkan sistem parlimen Westminster, dan sistem perundangannya juga berasaskan undang-undang am Inggeris.
Malaysia terletak berdekatan dengan khatulistiwa dan beriklim tropika, serta mempunyai kepelbagaian flora dan fauna, sehingga diiktiraf menjadi salah satu daripada 17 negara megadiversiti. Di Malaysia terletaknya Tanjung Piai, titik paling selatan di seluruh tanah besar Eurasia. Malaysia ialah sebuah negara perintis Persatuan Negara-Negara Asia Tenggara dan Pertubuhan Persidangan Islam, dan juga anggota Kerjasama Ekonomi Asia-Pasifik, Negara-Negara Komanwel, dan Pergerakan Negara-Negara Berkecuali.
"""
def simple_cleaning(string):
return ''.join([s for s in string if s not in ',.\'";'])
string = malaya.text.function.split_into_sentences(string)
string = [simple_cleaning(s) for s in string if len(s) > 50]
string
r = model.greedy_decoder(string)
g = r[0]['G']
for i in range(1, len(r), 1):
g.update(r[i]['G'])
plt.figure(figsize=(17, 17))
pos = nx.spring_layout(g)
nx.draw(g, with_labels=True, node_color='skyblue', edge_cmap=plt.cm.Blues, pos = pos)
nx.draw_networkx_edge_labels(g, pos=pos)
plt.show()
```
| github_jupyter |
```
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xarray as xr
from neurolib.models.multimodel import MultiModel
from yasa import get_centered_indices
from aln_thalamus import ALNThalamusMiniNetwork
from plotting import plot_spectrum
DPI = 75
CMAP = "plasma"
plt.rcParams["figure.figsize"] = (20, 9)
plt.style.use("default_light.mplstyle")
SW = {"low_freq": 0.1, "high_freq": 2.5}
SP = {"low_freq": 11.0, "high_freq": 16.0}
SAVE_FIG = False
def simulate(
conn=2.0,
ou_exc=4.1,
ou_inh=1.8,
ou_sigma=0.0,
tauA=1000.0,
glk=0.018,
a=0.0,
b=15.0,
):
# all times in ms
duration = 30000.0
t_spin_up = 5000.0
sampling_dt = 1.0
dt = 0.01
model = MultiModel(
ALNThalamusMiniNetwork(
np.array([[0.0, conn], [0.0, 0.0]]), np.array([[0.0, 13.0], [13.0, 0.0]])
)
)
model.params["*g_LK"] = glk
model.params["ALNThlmNet.ALNNode_0.ALNMassEXC_0.a"] = a
model.params["*b"] = b
model.params["*tauA"] = tauA
model.params["*EXC*mu"] = ou_exc
model.params["*INH*mu"] = ou_inh
model.params["*ALNMass*input*sigma"] = ou_sigma
model.params["duration"] = duration + t_spin_up
model.params["dt"] = dt
model.params["sampling_dt"] = sampling_dt
model.params["backend"] = "numba"
model.run()
results_df = pd.DataFrame(
{
"ALN-EXC": model.r_mean_EXC[0, :] * 1000.0,
"ALN-INH": model.r_mean_INH[0, :] * 1000.0,
"I_adapt": model.I_A_EXC[0, :],
"TCR": model.r_mean_EXC[1, :] * 1000.0,
},
index=model.t,
)
results_df.index.name = "time"
return results_df
def plot_single(df, ax, ax2legend=False):
(l1,) = ax.plot(df.index, df["ALN-INH"], color="C1", alpha=0.7)
(l2,) = ax.plot(df.index, df["ALN-EXC"], color="C0")
ax.set_ylim([-40, 130])
sns.despine(trim=True, ax=ax)
ax2 = ax.twinx()
(l3,) = ax2.plot(df.index, df["TCR"], color="k", linewidth=2.5, alpha=0.5)
ax2.set_ylim([-50, 1600.0])
ax2.set_yticks([0.0, 400.0])
if ax2legend:
ax2.set_ylabel("$r_{TCR}$")
ax2.yaxis.set_label_coords(1.1, 0.13)
sns.despine(trim=True, ax=ax2)
return l1, l2, l3
conns = [0.0, 0.025, 0.05, 0.1]
params = [
(2.8, 2.0, "Inside $\mathregular{{LC_{{aE}}}}$"),
(2.33, 2.0, r"Border DOWN $\times$ $\mathregular{{LC_{{aE}}}}$"),
(3.5, 2.0, r"Border $\mathregular{{LC_{{aE}}}}$ $\times$ UP"),
]
# noise-less
noise = 0.0
fig, axs = plt.subplots(
nrows=len(conns),
ncols=len(params),
sharex=True,
figsize=(20, 3 * len(conns)),
squeeze=False,
)
for j, (exc_inp, inh_inp, plot_name) in enumerate(params):
for i, conn in enumerate(conns):
ax = axs[i, j]
df = simulate(
conn=conn,
ou_exc=exc_inp,
ou_inh=inh_inp,
ou_sigma=noise,
tauA=1000.0,
glk=0.031,
a=0.0,
b=15.0,
)
l1, l2, l3 = plot_single(df.loc[5:20], ax=ax, ax2legend=(j == len(params) - 1))
if i == (len(conns) - 1):
ax.set_xlabel("time [sec]")
if j == 0:
ax.set_ylabel(
r"$N_{thal\to ctx}" f"={conn}$ \n\n $r_{{E}}$, $r_{{I}}$ [Hz]"
)
axs[0, j].set_title(plot_name)
plt.tight_layout()
fig.legend(
(l1, l2, l3),
("I", "E", "TCR"),
loc="upper center",
ncol=3,
bbox_to_anchor=(0.51, 0.01),
bbox_transform=fig.transFigure,
)
# to PDF due transparency
if SAVE_FIG:
plt.savefig(
f"../figs/aln_rec_spindle_{noise}.pdf",
transparent=True,
bbox_inches="tight",
)
# with noise: only right border is interesting
noise = 0.05
fig, axs = plt.subplots(
ncols=len(conns),
nrows=1,
figsize=(20, 3.5),
squeeze=False,
)
params = [
(3.25, 2.0, r"Border $\mathregular{{LC_{{aE}}}}$ $\times$ UP"),
]
for i, (exc_inp, inh_inp, plot_name) in enumerate(params):
for j, conn in enumerate(conns):
ax = axs[i, j]
df = simulate(
conn=conn,
ou_exc=exc_inp,
ou_inh=inh_inp,
ou_sigma=noise,
tauA=1000.0,
glk=0.031,
a=0.0,
b=15.0,
)
l1, l2, l3 = plot_single(df.loc[5:20], ax=ax, ax2legend=(j == len(conns) - 1))
ax.set_xlabel("time [sec]")
ax.set_title(r"$N_{thal\to ctx}" + f"={conn}$")
if j == 0:
ax.set_ylabel("$r_{E}$, $r_{I}$ [Hz]")
plt.tight_layout()
fig.legend(
(l1, l2, l3),
("I", "E", "TCR"),
loc="upper center",
ncol=3,
bbox_to_anchor=(0.51, 0.01),
bbox_transform=fig.transFigure,
)
# to PDF due transparency
if SAVE_FIG:
plt.savefig(
f"../figs/aln_rec_spindle_{noise}.pdf",
transparent=True,
bbox_inches="tight",
)
```
| github_jupyter |
## Contents
0. Import Libraries and Load Data
1. Data Preparation for PanelData Model
2. Bassic Panel Model
- PooledOLS model
- RandomEffects model
- BetweenOLS model
3. Testing correlated effects
- Testing for Fixed Effects
- Testing for Time Effects
- First Differences
4. Comparison
- Comparing between modelBetween, modelRE and modelPooled models
- Comparing between Robust, Entity and Entity-Time mothods
5. Instruments as lags of order 1 and 2 of first differences
- Campute the lags of order 1 and 2 of first differences
6. Linear Instrumental-Variables Regression
- 2SLS as OLS
- IV 2SLS
- Tests
- Sargan test: Testing the absence of correlation between Z and U
- Testing the correlation of Z and X_endog
- Endogeneity testing using `Durbin's and Wu-Hausman test of exogeneity
- Augmented test for testing the exogeneity `log_fare`
- Instrumenting using two-stage least squares
- Homoskedasticity – Heteroskedasticity
- Breusch–Pagan test
- White test
7. GMM Estimation
8.1. Exogeneity test using the augmented regression approach
8.2. Testing Autocorrelation
9. Feasible Generalized Least Squares (GLS) and GLSA model
10. References
```
# Importning libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.animation as animation
import glob
from glob import iglob
import datetime as dt
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
from linearmodels import PanelOLS, PooledOLS, BetweenOLS, RandomEffects, FirstDifferenceOLS
from statsmodels.stats.outliers_influence import variance_inflation_factor, OLSInfluence
%matplotlib inline
path = '../notebooks/final_database.csv'
df = pd.read_csv(path, decimal='.', sep=',')
df['quarter'] = pd.to_datetime(df.quarter).dt.to_period('Q-DEC')
df.sort_values(['citymarket1_id', 'citymarket2_id','quarter'], inplace=True)
df.head()
```
## Preparing the `PanelData`
- To use the data as `PanelData`, we need:
- to campute a dummies variable for each period (quarter, in our case),
- to identify the ID variable and the time variable, and then to set them in index,
- to sort the data to respect to the `ID`and the `period`.
- As the `Within` and the `First Difference` (respectively the `Second Difference`) estimators require at least 2 (respectively 4) observations per individual, we will delete the lines with only one, two and tree observations in the dataset.
- To do so, we will first campute the frequency of the `city market` in each quarter (here: number of quarter by city market) and then keep only the those are present in that dataset `at least 4 times`.
```
variables = ['log_passengers','log_nsmiles','log_fare','nb_airline','log_income_capita','log_population',
'log_kjf_price','dum_dist' ,'dum_q1','dum_q2','dum_q3','dum_q4']
df['citymarket_id'] = df.citymarket1_id.astype(str)+'_'+df.citymarket2_id.astype(str)
df['quarter_index'] = (df.quarter.dt.year.astype(str)+df.quarter.dt.quarter.astype(str)).astype(int)
panel0_df = df[['citymarket_id','quarter_index', 'quarter']+variables].copy()
panel0_df.sort_values(['citymarket_id','quarter_index'], inplace=True)
print('panel0_df has {} observations and {} variables'.format(panel0_df.shape[0], panel0_df[variables].shape[1]))
# Reset the index in order to campute the number of quarter by city market
# panel0_df.reset_index(inplace=True)
# Compute the dummies variables of quarter
panel0_df['quarter'] = pd.Categorical(panel0_df.quarter)
panel0_df.head()
# Campute and save the number of quarter by city market
nb_cm = panel0_df[['citymarket_id', 'quarter']].groupby(['citymarket_id']).nunique()
nb_cm.drop('citymarket_id', axis=1, inplace=True)
# Reset the index and rename the columns in order to merge the two datasets
nb_cm.reset_index(inplace=True)
nb_cm.columns = ['citymarket_id','nb_citymarket']
# Merging and dropping the no needed rows
panel1_df = pd.merge(panel0_df, nb_cm, on=['citymarket_id'], how='inner')
panel1_df = panel1_df[panel1_df.nb_citymarket>=4]
panel1_df.drop('nb_citymarket', axis=1, inplace=True)
print("We delete {} city-markets(lines) which didn't present at least 4 times in a given querter.".format(panel0_df.shape[0]-panel1_df.shape[0]))
print("So now, we have '{}' obserations in our dataset which will be used to camput the first and second differences.\n".format(panel1_df.shape[0]))
print('We have {} uniques city-pair markets and {} periods on our dataset'.format(panel1_df.citymarket_id.nunique(),
panel1_df.quarter.nunique()))
# Assign the city-market ID a new variable name `ID`
iden = panel1_df[['citymarket_id', 'quarter']].groupby(['citymarket_id']).nunique()
iden['ID'] = range(1, iden.shape[0]+1)
iden.drop('citymarket_id', axis=1, inplace=True)
iden.reset_index(inplace=True)
iden = iden[['citymarket_id', 'ID']]
panel1_df = pd.merge(iden, panel0_df, on=['citymarket_id'], how='inner')
panel1_df.head()
panel1_df.citymarket_id.nunique(), panel1_df.citymarket_id.count()
panel1_df.info()
print('Number of city-market:', panel1_df.citymarket_id.nunique(),
'\nNumber of quarter:', panel1_df.quarter.nunique())
```
## Basic regression
- First, run the PooledOLS as classical OLS regression to check the structure of the data .
- The log passengers is modeled using all independent variables and time dummies.
- `Note` that the dummies of quarters will not used at the same time with the dummies of times
https://bashtage.github.io/linearmodels/devel/panel/examples/examples.html
```
# the index in order to campute the number of quarter by city market
panel1_df.set_index(['citymarket_id','quarter_index'], inplace=True)
```
### Parameters
- `time_effects`: flag whether to include entity (fixed) effects in the model, if `True`
- `time_effects`: flag whether to include time effects in the model, if `True`
- `cov_type`:
- if `homoskedastic` or `unadjusted`: assume residual are homoskedastic
- if `heteroskedastic` or `robust`: control for heteroskedasticity using `White’s estimator`
- White’s robust covariance adds some robustness against certain types of specification issues. This estimator should not be used when including fixed effects (entity effects) because, no longer robust.
### 1. PooledOLS model
```
# Identifying the regressors. Note that the `quarter` is the time dummies
regressors = ['log_nsmiles','log_fare','nb_airline','log_income_capita','log_population','log_kjf_price','dum_dist','quarter']
modelPooled = PanelOLS(panel1_df.log_passengers, panel1_df[regressors],
entity_effects=False, time_effects=False, other_effects=None)
modelPooled = modelPooled.fit(cov_type='robust')
print(modelPooled)
modelPooled.f_pooled
modelPooled.entity_info
modelPooled.f_statistic
modelPooled.f_statistic_robust
modelPooled.variance_decomposition
"""modelRE.f_statistic
modelRE.f_statistic_robust
modelRE.variance_decomposition"""
```
### 2. RandomEffects model
```
# Identifying the regressors. Note that the `quarter` is the time dummies
regressors = ['log_nsmiles','log_fare','nb_airline','log_income_capita','log_population','log_kjf_price','dum_dist', 'quarter']
modelRE = RandomEffects(panel1_df.log_passengers, panel1_df[regressors])
modelRE = modelRE.fit(cov_type='robust')
print(modelRE)
modelRE.variance_decomposition
modelRE.theta.head()
```
### 2. BetweenOLS model
The quarter dummies are dropped since the averaging removes differences due to the quarter. These results are broadly similar to the previous models.
```
# Identifying the regressors. Note that the `quarter` is the time dummies
panel1_df['const'] = 1
regressors = ['const','log_nsmiles','log_fare','nb_airline','log_income_capita','log_population','log_kjf_price','dum_dist'] # , 'quarter'
modelBetween = BetweenOLS(panel1_df.log_passengers, panel1_df[regressors])
modelBetween = modelBetween.fit(cov_type='robust')
print(modelBetween)
```
## Testing correlated effects
> When effects are correlated with the regressors the RE and BE estimators are not consistent. The usual solution is to use Fixed Effects which are available in PanelOLS. Fixed effects are called entity_effects when applied to entities and time_effects when applied to the time dimension:
### 1. Testing for Fixed Effects
- Entity effects can be added using `entity_effects=True`.
- Time-invariant (`dum_dist`) variable is excluded when using entity effects since it will all be 0.
- Since the estimator is not robust, we set `cov_type='clustered'.
```
regressors = ['log_nsmiles','log_fare','nb_airline','log_income_capita','log_population','log_kjf_price', 'quarter']
modelFE = PanelOLS(panel1_df.log_passengers, panel1_df[regressors],
entity_effects=True, time_effects=False, other_effects=None)
modelFE = modelFE.fit(cov_type='clustered', cluster_entity=True)
print(modelFE)
```
### 2. Testing for Time Effects
- Time effect can be added using `time_effects=True`.
- Here, when we include or exclude the constant, we have the same results.
```
regressors = ['const','log_nsmiles','log_fare','nb_airline','log_income_capita','log_population']
modelTE = PanelOLS(panel1_df.log_passengers, panel1_df[regressors],
entity_effects=False, time_effects=True, other_effects=None)
modelTE = modelTE.fit(cov_type='clustered', cluster_entity=True, cluster_time=True)
print(modelTE)
```
### 3. First Differences
> First differencing is an alternative to using fixed effects when there might be correlation. When using first differences, time-invariant variables must be excluded.
```
regressors = ['log_nsmiles','log_fare','nb_airline','log_income_capita','log_population']
modelFD = FirstDifferenceOLS(panel1_df.log_passengers, panel1_df[regressors])
modelFD = modelFD.fit(cov_type='clustered', cluster_entity=True)
print(modelTE)
```
## Comparing between modelBetween, modelRE and modelPooled models
```
from linearmodels.panel import compare
modelCompare = compare({'PooledOLS':modelPooled, 'Between':modelBetween, 'RandomEffects':modelRE})
print(modelCompare)
```
### Comparing between Robust, Entity and Entity-Time mothods
```
regressors = ['const','log_nsmiles','log_fare','nb_airline','log_income_capita','log_population']
modelComp = PanelOLS(panel1_df.log_passengers, panel1_df[regressors])
robust = modelComp.fit(cov_type='robust')
clust_entity = modelComp.fit(cov_type='clustered', cluster_entity=True)
clust_entity_time = modelComp.fit(cov_type='clustered', cluster_entity=True, cluster_time=True)
from collections import OrderedDict
results = OrderedDict()
results['Robust'] = robust
results['Entity'] = clust_entity
results['Entity-Time'] = clust_entity_time
print(compare(results))
# Reset the index in order to compute the Ìnstrumentals variables`
panel1_df.reset_index(inplace=True)
panel1_df.head()
```
### Instruments as lags of order 1 and 2 of first differences
- Because, we want to campute the `first difference` and the `second difference` of the variables, we first need to campute the `lags` values of this variables.
- To do that, we first create a function named `lag_by_individual` and use the `shift()` python function inside this new one.
- The `lag_by_individual` function help us to identify the first and the last obseration of each individual (`city market`) as well as strictly succesive observations. In the lagged variables, the first observation for each `city market` will be `"NaN"` (missing value).
- The first difference is computed by using the following formular `difference(t) = observation(i,t) - observation(i,t-1)`. For example, for a given `city market`, we calculate the difference between the observation of `quarter q` and the observation of `quarter q-1`.
#### Let's test our `lag_by_individual`function with some observations before applying it in our data
#### Example of lagged variables
Because we want to be sure if our `lag_by_individual` function work well, we generate a small DataFrame and the test it before using our big table.
```
# Create a random data
np.random.seed(0) # ensures the same set of random numbers are generated
date = ['2019-01-01']*3 + ['2019-01-02']*3 + ['2019-01-03']*3+['2019-01-04']*3
var1, var2 = np.random.randn(12), np.random.randn(12)*20
group = ["group1", "group2", "group3"]*4 # to assign the groups for the multiple group case
DataFrame = pd.DataFrame({"quarter_index": date, "citymarket_id":group, "var1": var1, "var2": var2}) # many vars, many groups
grouped_df = DataFrame.groupby(["citymarket_id"])
# The function
def lag_by_individual(key, value_df):
"""
This first line returns a copy of the df, with group columns assigned the key value.
The parenthesis allow us to chain methods and avoid intermediate variable assignment
Refference:
https://towardsdatascience.com/timeseries-data-munging-lagging-variables-that-are-distributed-across-multiple-groups-86e0a038460c
"""
df = value_df.assign(citymarket_id = key)
return (df.sort_values(by=["quarter_index"], ascending=True).set_index(["quarter_index"]).shift(1))
# Applied the function
lag_values = [lag_by_individual(g, grouped_df.get_group(g)) for g in grouped_df.groups.keys()]
lag_df = pd.concat(lag_values, axis=0).reset_index()
lag_df.loc[(lag_df.citymarket_id.isna() != True), 'id'] = 1 # This variable help to campute the diffenrence only when the obs are strictly succesive
lag_df.loc[(lag_df.citymarket_id.isna() == True), 'citymarket_id'] = lag_df.citymarket_id.shift(-1) # deshift the varaiable
lag_df.set_index(['quarter_index','citymarket_id'], inplace=True)
lag_df.columns = lag_df.columns.values+'_lag1'
dif = pd.merge(DataFrame,lag_df, on = ['quarter_index','citymarket_id'], how='inner').sort_values(['citymarket_id','quarter_index'])
dif.loc[(dif.id_lag1.isna() != True), 'var1_dif1'] = dif.var1-dif.var1.shift()
dif.loc[((dif.id_lag1.isna() != True) & (dif.var1_dif1.isna() != True)), 'var1_dif2'] = dif.var1_dif1.shift()
dif.loc[((dif.id_lag1.isna() != True) & (dif.var1_dif2.shift().isna() != True)), 'var1_dif3'] = dif.var1_dif1.shift(2)
dif.loc[((dif.id_lag1.isna() != True) &
(dif.var1_dif1.isna() != True) & (dif.var1_dif2.shift().isna() != True)), 'var1_dif3'] = dif.var1_dif1.shift(2)
dif.tail(20)
grouped_df = panel1_df.groupby(["citymarket_id"])
def lag_by_individual(key, value_df):
"""
- This first line returns a copy of the df, with group columns assigned the key value.
- The function return the lagged values by city market. The first observation for each group will be "NaN".
Refference:
https://towardsdatascience.com/timeseries-data-munging-lagging-variables-that-are-distributed-across-multiple-groups-86e0a038460c
"""
df = value_df.assign(citymarket_id = key)
return (df.sort_values(by=["quarter"], ascending=True).set_index(["quarter"]).shift(1))
# Apply the function
lag_values = [lag_by_individual(g, grouped_df.get_group(g)) for g in grouped_df.groups.keys()]
lag_df = pd.concat(lag_values, axis=0).reset_index()
lag_df.loc[(lag_df.citymarket_id.isna() != True), 'id'] = 1 # This variable help to campute the diffenrence only when the obs are strictly succesive
lag_df.loc[(lag_df.citymarket_id.isna() == True), 'citymarket_id'] = lag_df.citymarket_id.shift(-1) # deshift the varaiable
lag_df.set_index(['quarter','citymarket_id'], inplace=True)
lag_df.columns = lag_df.columns.values+'_lag1'
lag_df = lag_df[['id_lag1']]
final_df = pd.concat([panel1_df.set_index(['quarter','citymarket_id']), lag_df],axis=1).reset_index()
final_df.head()
```
### Campute the lags of order 1 and 2 of first differences
Note that we create also a lagged variable `log_passengers` withou the first difference name `log_passengers_lag1`. This variable will be used in th dynamique model
```
final_df.loc[(final_df.id_lag1.isna() != True), 'log_passengers_lag1'] = final_df.log_passengers.shift()
final_df.loc[(final_df.id_lag1.isna() != True), 'log_passengers_dif1'] = final_df.log_passengers-final_df.log_passengers.shift()
final_df.loc[((final_df.id_lag1.isna() != True) &
(final_df.log_passengers_dif1.isna() != True)), 'log_passengers_dif2'] = final_df.log_passengers_dif1.shift()
final_df.loc[((final_df.id_lag1.isna() != True) &
(final_df.log_passengers_dif1.isna() != True) &
(final_df.log_passengers_dif1.shift().isna() != True)), 'log_passengers_dif3'] = final_df.log_passengers_dif1.shift(2)
final_df.loc[(final_df.id_lag1.isna() != True), 'log_nsmiles_dif1'] = final_df.log_nsmiles-final_df.log_nsmiles.shift()
final_df.loc[(final_df.id_lag1.isna() != True), 'log_nsmiles_dif2'] = final_df.log_nsmiles_dif1.shift()
# For a given individual(city market), the distance still constant in each periods
#final_df = final_df.loc[(final_df.id_lag1.isna() != True) & (final_df.log_nsmiles == final_df.log_nsmiles.shift())]
#final_df = final_df.loc[(final_df.id_lag1.isna() != True) & (final_df.log_nsmiles == final_df.log_nsmiles.shift(2))]
#final_df = final_df.loc[(final_df.id_lag1.isna() != True) & (final_df.log_nsmiles == final_df.log_nsmiles.shift(3))]
#final_df = final_df.loc[final_df.log_nsmiles_dif2==0]
final_df.loc[(final_df.id_lag1.isna() != True), 'log_fare_dif1'] = final_df.log_fare-final_df.log_fare.shift()
final_df.loc[(final_df.id_lag1.isna() != True), 'log_fare_dif2'] = final_df.log_fare_dif1.shift()
final_df.loc[(final_df.id_lag1.isna() != True), 'nb_airline_dif1'] = final_df.nb_airline-final_df.nb_airline.shift()
final_df.loc[(final_df.id_lag1.isna() != True), 'nb_airline_dif2'] = final_df.nb_airline_dif1.shift()
final_df.loc[(final_df.id_lag1.isna() != True), 'log_income_capita_dif1'] = final_df.log_income_capita-final_df.log_income_capita.shift()
final_df.loc[((final_df.id_lag1.isna() != True) &
(final_df.log_income_capita_dif1.isna() != True)), 'log_income_capita_dif2'] = final_df.log_income_capita_dif1.shift()
final_df.loc[((final_df.id_lag1.isna() != True) &
(final_df.log_income_capita_dif1.isna() != True) &
(final_df.log_income_capita_dif1.shift().isna() != True)), 'log_income_capita_dif3'] = final_df.log_income_capita_dif1.shift(2)
final_df.loc[(final_df.id_lag1.isna() != True), 'log_population_dif1'] = final_df.log_population-final_df.log_population.shift()
final_df.loc[((final_df.id_lag1.isna() != True) &
(final_df.log_population_dif1.isna() != True)), 'log_population_dif2'] = final_df.log_population_dif1.shift()
final_df.loc[((final_df.id_lag1.isna() != True) &
(final_df.log_population_dif1.isna() != True) &
(final_df.log_population_dif1.shift().isna() != True)), 'log_population_dif3'] = final_df.log_population_dif1.shift(2)
final_df.loc[(final_df.id_lag1.isna() != True), 'log_kjf_dif1'] = final_df.log_kjf_price-final_df.log_kjf_price.shift()
final_df.loc[((final_df.id_lag1.isna() != True) &
(final_df.log_kjf_dif1.isna() != True)), 'log_kjf_dif2'] = final_df.log_kjf_dif1.shift()
final_df.loc[((final_df.id_lag1.isna() != True) &
(final_df.log_kjf_dif1.isna() != True) &
(final_df.log_kjf_dif1.shift().isna() != True)), 'log_kjf_dif3'] = final_df.log_kjf_dif1.shift(2)
final_df[['quarter','citymarket_id','log_passengers','log_passengers_lag1','log_passengers_dif1','log_passengers_dif2',
'log_fare','log_fare_dif1','log_fare_dif2','log_income_capita_dif1','log_income_capita_dif3','log_population_dif3']].head()
# Eliminate observations with missing values (data without first and second differences)
final_df.dropna(axis=0, how='any', inplace=True)
print("We delete '{}' observations because their have not first or second differences values.".format(panel1_df.shape[0]-final_df.shape[0]))
print("Now, we have '{}' obserations in our dataset after camputing the first and second differences.\n".format(final_df.shape[0]))
final_df[['quarter','citymarket_id','log_passengers','log_passengers_lag1','log_passengers_dif1','log_passengers_dif2',
'log_fare','log_fare_dif1','log_fare_dif2','log_income_capita_dif1']].head()
print('We have {} uniques city-pair markets and {} periods on our dataset'.format(final_df.citymarket_id.nunique(),
final_df.quarter.nunique()))
"""final_df.sort_values(by=['ID','quarter'], inplace=True)
# Exportation
path = '../notebooks/final_panel_df.csv'
final_df.to_csv(path, index=False)"""
```
### Here, we compute the dummies of the times variables manually
```
final_df['get_dum_quarter'] = final_df['quarter_index']
dum_period = pd.get_dummies(final_df.quarter_index, prefix='dum', columns=['quarter_index']).columns.values.tolist()
panel_df = pd.get_dummies(final_df, prefix='dum', columns=['get_dum_quarter'])
panel_df['quarter'] = pd.Categorical(panel_df.quarter_index)
panel_df.sort_values(['citymarket_id', 'quarter_index'], inplace=True)
panel_df.set_index(['citymarket_id', 'quarter_index'], inplace=True)
panel_df.head()
# Show the columns of the times dummies
np.array(dum_period)
print('Number of city-market:', panel_df.ID.nunique(),
'\nNumber of quarter:', panel_df.quarter.nunique())
panel_df.shape
```
## Linear Instrumental-Variables Regression
[Reference](https://bashtage.github.io/linearmodels/doc/iv/examples/advanced-examples.html)
```
from linearmodels import IV2SLS, IVLIML, IVGMM, IVGMMCUE
```
## 1. IV 2SLS as OLS
For running a `2SLS` as `OLS` estimator of parameters in PanelData, we call the `IV2SLS` using `None` for the `endogenous` and the `instruments`.
```
controls = ['const','log_passengers_lag1','log_nsmiles','log_fare','log_income_capita','log_population',
'nb_airline','log_kjf_price','dum_dist']
ivolsmodel = IV2SLS(panel_df.log_passengers,
panel_df[controls + dum_period[:-2]],
None, None).fit()
print(ivolsmodel.summary)
```
### 2. IV 2SLS using `log_income_capita_dif1` and `log_fare` as endogenous variables
```
"""
instruements = ['log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_income_capita_dif2','log_population_dif2','nb_airline_dif2',
'log_income_capita_dif3','log_population_dif3']"""
controls = ['const','log_nsmiles','log_income_capita','log_population',
'nb_airline','log_kjf_price','dum_dist']
instruements = ['log_nsmiles_dif1','log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_fare_dif1','log_fare_dif2','log_passengers_dif2',
'log_nsmiles_dif2','log_income_capita_dif2','log_population_dif2','nb_airline_dif2']
iv2LSmodel = IV2SLS(panel_df['log_passengers'],
panel_df[controls + dum_period[:-2]],
panel_df[['log_fare','log_passengers_lag1']],
panel_df[instruements]).fit()
print(iv2LSmodel.summary)
```
## 3. Tests
### 3.1. Testing the absence of correlation between Z and U
- We estimate two models separately:
- `iv2LSmodel1` when log_passengers is considered as endogenous variable
- `iv2LSmodel2` when log_fare is considered as endogenous variable
- For each model, we save the residuals (see the `Sargan` test part) and test
> (In essence, when the regressor and error are correlated, the parameter is not identiÖed. The presence of an instrument solves the identiÖcation problem.)
> Instruments are correlated with X, but uncorrelated with the model error term by assumption or by construction.
```
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import scipy.stats as st
controls = ['const','log_nsmiles','log_income_capita','log_population',
'nb_airline','log_kjf_price','dum_dist']
instruements = ['log_nsmiles_dif1','log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_fare_dif1','log_fare_dif2','log_passengers_dif2',
'log_nsmiles_dif2','log_income_capita_dif2','log_population_dif2','nb_airline_dif2']
iv2LSmodel1 = IV2SLS(panel_df['log_passengers'],
panel_df[controls+dum_period[:-2]],
panel_df[['log_passengers_lag1']], panel_df[instruements]).fit()
iv2LSmodel2 = IV2SLS(panel_df['log_passengers'],
panel_df[controls+dum_period[:-2]],
panel_df[['log_fare']], panel_df[instruements]).fit()
```
#### 3.1.1. Compute the Q-Sargan for testing the absence of correlation between Z and U
- Store the residuals in a variable name `iv_resids`
- Estimate a regression of the estimated residuals on all instruments
- Compute the R-square of this laste regression
- Get the number of observations and campute the Q-Sargan
- Compart this computed value of Sagan and the Sargan value provided directly by the model
```
# Compute the Q-Sargan in model1: when log_passengers_lag1 is considered as endogenous
panel_df['iv_resids'] = iv2LSmodel1.resids.values
cor_sargan = sm.OLS(panel_df.iv_resids, panel_df[controls+instruements]).fit()
r_pred = cor_sargan.predict(panel_df[controls+instruements])
r_square = r2_score(panel_df.iv_resids, r_pred)
degree_freedom = cor_sargan.df_model
nobs = cor_sargan.nobs
q_sargan = nobs*r_square
print('Q-Sargan:', q_sargan)
# Compute the Q-Sargan in model1: when log_fare is considered as endogenous
panel_df['iv_resids'] = iv2LSmodel2.resids.values
cor_sargan = sm.OLS(panel_df.iv_resids, panel_df[controls+instruements]).fit()
r_pred = cor_sargan.predict(panel_df[controls+instruements])
r_square = r2_score(panel_df.iv_resids, r_pred)
degree_freedom = cor_sargan.df_model
nobs = cor_sargan.nobs
q_sargan = nobs*r_square
print('Q-Sargan:', q_sargan)
```
The value of `Khi-2` with p-(k+1) degrees of freedom (`8`) is `21.955`. Hence, we reject the null hypothesis.
#### 3.1.2. We can also get this sargan statistic test from the IV model (as below).
```
iv2LSmodel1.sargan
iv2LSmodel2.sargan
```
> Wooldridge’s regression-based test of exogeneity is robust to heteroskedasticity since it inherits the covariance estimator from the model. Here there is little difference.
Wooldridge’s score test is an alternative to the regression test, although it usually has slightly less power since it is an LM rather than a Wald type test.
```
iv2LSmodel1.wooldridge_regression
iv2LSmodel2.wooldridge_regression
iv2LSmodel1.wooldridge_score
iv2LSmodel2.wooldridge_score
iv2LSmodel1.wooldridge_overid
iv2LSmodel2.wooldridge_overid
iv2LSmodel1.basmann_f
iv2LSmodel2.basmann_f
```
### 3.2. Testing the correlation of Z and X_endog
- We estimate two differents OLS models:
- `cor_z_fare` when log_fare is the considered as reponse variable
- `cor_z_pass` when log_passengers_lag1 is the considered as reponse variable
- For each model, the explanatories includes the `controls` and the `instrumentals` variables
- The idea is to test if the coefficients of the `instruments` are null.
- H0: the coefficients of the `instruments` are equal to zero
F-test and Wald test is used to test if a variable has not effect. Note that the F-testis is a special case of wald_test that always uses the F distribution.
```
instruements = ['log_nsmiles_dif1','log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_fare_dif1','log_fare_dif2','log_passengers_dif2',
'log_nsmiles_dif2','log_income_capita_dif2','log_population_dif2','nb_airline_dif2']
H0 = '(log_nsmiles_dif1=log_income_capita_dif1=log_population_dif1=nb_airline_dif1=log_fare_dif2=log_passengers_dif2=log_nsmiles_dif2=log_income_capita_dif2=log_population_dif2=nb_airline_dif2=0)'
# Using the f_test from the OLS results
cor_z_fare = sm.OLS(panel_df[['log_fare']], panel_df[controls+dum_period[:-2]+instruements]).fit()
cor_z_pass = sm.OLS(panel_df[['log_passengers_lag1']], panel_df[controls+dum_period[:-2]+instruements]).fit()
print('test between Z and fare:\n', cor_z_fare.f_test(H0))
print()
print('test between Z and lag_passenger:\n', cor_z_pass.f_test(H0))
# Using valde test from the PanelOLS results
cor_z_fare = PanelOLS(panel_df.log_fare, panel_df[controls+dum_period[:-2]+instruements]).fit()
cor_z_pass = PanelOLS(panel_df.log_passengers_lag1, panel_df[controls+dum_period[:-2]+instruements]).fit()
print('testing correlation between Z and fare')
cor_z_fare.wald_test(formula=H0)
print('testing correlation between Z and lag_passenger')
cor_z_pass.wald_test(formula=H0)
```
#### The `Statistic` all of this previous tests allow to reject the null hypothesis. The coefficients of the `Instruments` are note equal to zeros. In oder words, the **`instrumentals are indeed strong and relevent`**.
### 3.3. Endogeneity testing using `Durbin's` and `Wu-Hausman` test of exogeneity
> 1. The Durbin test is a classic of endogeneity which compares OLS estimates with 2SLS and exploits the fact that OLS estimates will be relatively efficient. Durbin’s test is not robust to heteroskedasticity.
> 2. The Wu-Hausman test is a variant of the Durbin test that uses a slightly different form.
```
iv2LSmodel1.durbin()
iv2LSmodel2.durbin()
iv2LSmodel1.wu_hausman()
iv2LSmodel2.wu_hausman()
iv2LSmodel1.f_statistic
iv2LSmodel2.f_statistic
```
### 3.4. Augmented test for testing the exogeneity `log_fare` and `log_passengers_lag1`
- Using `F-test` for a joint linear hypothesis and `Wald test`: testing if a variable has not effect
- [WaldTestStatistic](https://bashtage.github.io/linearmodels/panel/results.html): hypothesis test examines whether 𝐻0: 𝐶𝜃=𝑣 where the matrix C is restriction and v is value. The test statistic has a 𝜒2𝑞 distribution where q is the number of rows in C. See the [Source code for linearmodels.panel.results](https://bashtage.github.io/linearmodels/_modules/linearmodels/panel/results.html#PanelEffectsResults)
```
# Augmented test for testing the exogeneity log_fare
aug_residus = PanelOLS(panel_df.log_fare, panel_df[controls + dum_period[:-2]]).fit()
panel_df['fare_resids'] = aug_residus.resids.values
aug_wald = sm.OLS(panel_df.log_passengers, panel_df[['log_fare','fare_resids']+controls]).fit()
H0_formula = '(fare_resids = 0)' # We can add namy variable as following: H0_formula = 'x2 = x3 = 0'
aug_wald.f_test(H0_formula)
aug_wald.summary()
# Augmented test for testing the exogeneity log_passengers_lag1
aug_residus = PanelOLS(panel_df.log_passengers_lag1, panel_df[controls + dum_period[:-2]]).fit()
panel_df['pass_lag_resids'] = aug_residus.resids.values
aug_wald = sm.OLS(panel_df.log_passengers, panel_df[['log_passengers_lag1','pass_lag_resids']+controls]).fit()
H0_formula = '(pass_lag_resids = 0)' # We can add namy variable as following: H0_formula = 'x2 = x3 = 0'
aug_wald.f_test(H0_formula)
aug_wald.summary()
```
### 4. Instrumenting using two-stage least squares (IV method)
> - endog is the dependent variable, y
- exog is the x matrix that has the endogenous information in it. Include the endogenous variables in it.
instrument is the z matrix. Include all the variables that are not endogenous and replace the endogenous variables from the exog matrix (above) with what ever instruments you choose for them.
- First stage: we regress the endogenous variable (`log_fare`, `log_passengers_lag1` respectively) on all other regressors and all the instruments and save the fitted values series.
- Second Stage: We regress the previous replacing, `log_fare`, by `log_fare_hat` (and `log_passengers_lag1` by `pass_lag_hat` respectively)
```
controls = ['const','log_passengers_lag1','log_nsmiles','log_income_capita','log_population','nb_airline',
'log_kjf_price','dum_dist']
instruements = ['log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_income_capita_dif2','log_population_dif2','nb_airline_dif2']
# log_fare as reponse variable
iv_1stage1 = PanelOLS(panel_df[['log_fare']], panel_df[controls+instruements+dum_period[:-2]]).fit()
# Fitted value of previous variables
panel_df['fare_hat'] = iv_1stage1.predict()
# OLS regression using the fitted values
iv_2stage1 = PanelOLS(panel_df[['log_passengers']], panel_df[['fare_hat']+controls+dum_period[:-2]]).fit()
#print(iv_2stage1.summary)
controls = ['const','log_fare','log_nsmiles','log_income_capita','log_population','nb_airline',
'log_kjf_price','dum_dist']
# log_passengers_lag1 as reponse variable
iv_1stage2 = PanelOLS(panel_df[['log_passengers_lag1']], panel_df[controls+instruements+dum_period[:-2]]).fit()
# Fitted value of previous variables
panel_df['pass_lag_hat'] = iv_1stage2.predict()
# OLS regression using the fitted values
iv_2stage2 = PanelOLS(panel_df[['log_passengers']], panel_df[['pass_lag_hat']+controls+dum_period[:-2]]).fit()
#print(iv_2stage2.summary)
# OLS regression using the two fitted values of `fare_hat` and `pass_lag_hat`
controls = ['const','log_nsmiles','log_income_capita','log_population','nb_airline',
'log_kjf_price','dum_dist']
iv_2stage = PanelOLS(panel_df[['log_passengers']], panel_df[['fare_hat','pass_lag_hat']+controls+dum_period[:-2]]).fit()
print(iv_2stage.summary)
```
As you can see, we have same results either using IV or `Two Stage Least Squares` method.
==> Our instruments are valide.
## [Homoskedasticity](https://en.wikipedia.org/wiki/Homoscedasticity) - [Heteroskedasticity](https://en.wikipedia.org/wiki/Heteroscedasticity) test
The homoscedasticity hypothesis implies that the variance of the errors are equal:
\begin{equation*} 𝑉(𝜀|𝑋) = 𝜎^2𝐼 \end{equation*}
\begin{equation*} H_0: \sigma^2_i = \sigma^2 \end{equation*}
```
from statsmodels.stats.diagnostic import het_breuschpagan
from statsmodels.stats.diagnostic import het_white
controls = ['const','log_nsmiles','log_income_capita','log_population','nb_airline',
'log_kjf_price','dum_dist']
instruements = ['log_nsmiles_dif1','log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_fare_dif1','log_fare_dif2','log_passengers_dif2',
'log_nsmiles_dif2','log_income_capita_dif2','log_population_dif2','nb_airline_dif2']
iv2LSmodel = IV2SLS(panel_df['log_passengers'],
panel_df[controls+dum_period[:-2]],
panel_df[['log_fare','log_passengers_lag1']], panel_df[instruements]).fit()
```
### Breusch–Pagan test
$y$ = $\beta_0+\beta_1x+µ$
$\hat{µ}^2$ = $\rho_0+\rho_1x+𝑣$
- Breusch–Pagan test using `python library`
- Breusch–Pagan test computed manually (using two methods)
```
bp_test = het_breuschpagan(iv2LSmodel.resids, iv2LSmodel.model.exog.original)
print('Lagrange multiplier statistic: {} \nP_value: {}\nf-value: {} \nfP_value: {}'.format(bp_test[0], bp_test[1],
bp_test[2], bp_test[3]))
"""
https://en.wikipedia.org/wiki/Breusch–Pagan_test
If the test statistic has a p-value below an appropriate threshold (e.g. p < 0.05)
then the null hypothesis of homoskedasticity is rejected and heteroskedasticity assumed.
"""
panel_df['iv2_resids2'] = (iv2LSmodel.resids.values)**2
het_breuschpagan = PanelOLS(panel_df.iv2_resids2, panel_df[controls+dum_period[:-2]]).fit()
fval = het_breuschpagan.f_statistic
fpval = het_breuschpagan.pvalues
if round(fval.pval,3) < 0.05:
BreuschPagan_H0 = "We rejected H0: the null hypothesis of homoskedasticity. So, we have `heteroskedasticity`."
else:
BreuschPagan_H0 = "We don't rejected H0: the null hypothesis of homoskedasticity"
print(fval)
print()
print(BreuschPagan_H0)
het_bp = PanelOLS(panel_df.iv2_resids2, panel_df[controls+dum_period[:-2]]).fit()
het_bp_pred = het_bp.predict(panel_df[controls+dum_period[:-2]])
r_square = r2_score(panel_df.iv2_resids2, het_bp_pred)
m = len(controls+dum_period[:-2])
nobs = het_bp.nobs
q_het_bp = nobs*r_square
print(q_het_bp)
```
The value of `Khi-2` with m (number of regressors degrees of freedom (`37`) is `61.581`. Hence, we reject the null hypothesis.
### White test
$\hat{µ}^2$ = $δ_{0}+δ_{1}x_{1}+…+δ_{k}x_{k}+λ_{1}x_{1}^{2}+…+λ_{k}x_{k}^{2}+φ_{1}x_{1}x_{2}+…+φ_{k-1}x_{k-1}x_{k}+ν$
> According to [Takashi Yamano](http://www3.grips.ac.jp/~yamanota/Lecture%20Note%208%20to%2010%202SLS%20&%20others.pdf) (P.22), "because $\hat{y}$ includes all independent variables, this test is equivalent of conducting the following test":
$\hat{µ}^2$ = $δ_{0}+δ_{1}\hat{y}+δ_{1}\hat{y}^{2}+ν$
- White test using `python library`
- White test computed manually (using $\hat{µ}^2$ = $δ_{0}+δ_{1}\hat{y}+δ_{1}\hat{y}^{2}+ν$ equation)
```
#name = ['Lagrange multiplier statistic', 'P_value','f-value','fP_value']
white_test = het_white(iv2LSmodel.resids, iv2LSmodel.model.exog.original)
print('Lagrange multiplier statistic: {} \nP_value: {}\nf-value: {} \nfP_value: {}'.format(white_test[0], white_test[1],
white_test[2], white_test[3]))
# Method 1 # https://www.dummies.com/education/economics/econometrics/test-for-heteroskedasticity-with-the-white-test/
y_hat, y_hat_2 = iv2LSmodel.fitted_values, iv2LSmodel.fitted_values**2
square_resids = (iv2LSmodel.resids)**2
iv_hat = pd.concat([y_hat, y_hat_2,square_resids], axis=1)
iv_hat.columns = ['y_hat','y_hat_2','resids2']
het_white = PanelOLS(iv_hat.resids2, iv_hat[['y_hat','y_hat_2']]).fit()
fval = het_white.f_statistic
fpval = het_white.pvalues
if round(fval.pval,3) < 0.05:
White_H0 = "We rejected H0: the null hypothesis of homoskedasticity, so we have `heteroskedasticity` in our model."
else:
White_H0 = "We don't rejected H0: the null hypothesis of homoskedasticity"
print(fval)
print()
print(White_H0)
controls = ['const','log_nsmiles','log_income_capita','log_population','nb_airline',
'log_kjf_price','dum_dist']
instruements = ['log_nsmiles_dif1','log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_fare_dif1','log_fare_dif2','log_passengers_dif2',
'log_nsmiles_dif2','log_income_capita_dif2','log_population_dif2','nb_airline_dif2']
iv2LSmodel = IV2SLS(panel_df['log_passengers'],
panel_df[controls+dum_period[:-2]],
panel_df[['log_fare','log_passengers_lag1']], panel_df[instruements]).fit()
fig, ax = plt.subplots(figsize=(12,8))
a = plt.axes(aspect='equal')
plt.scatter(panel_df.log_passengers.values, iv2LSmodel.predict().values, alpha=.007, c='b')
plt.xlabel('True Values [log_passengers]')
plt.ylabel('IV Predictions [log_passengers]')
lims = [panel_df.log_passengers.min(), panel_df.log_passengers.max()]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims, c='r')
fig, ax = plt.subplots(figsize=(12, 8))
sns.distplot(iv2LSmodel.resids, bins=200, hist=True, kde='gaussian', color='b', ax=ax, norm_hist=True)
sns.distplot(iv2LSmodel.resids, bins=200, hist=False, kde='kernel', color='r', ax=ax, norm_hist=True)
ax.set_title("IV Residuals Plot", fontsize=27)
ax.set_xlim(-1.5,1.5)
ax.set_xlabel('IV Residuals', fontsize=20)
plt.show
```
## GMM Estimation
> GMM estimation can be more efficient than 2SLS when there are more than one instrument. By default, 2-step efficient GMM is used (assuming the weighting matrix is correctly specified). It is possible to iterate until convergence using the optional keyword input iter_limit, which is naturally 2 by default. Generally, GMM-CUE would be preferred to using multiple iterations of standard GMM. Source: [linearmodels 4.5](https://bashtage.github.io/linearmodels/doc/iv/examples/advanced-examples.html)
### Parameters
- According to the [linearmodels 4.5](https://bashtage.github.io/linearmodels/doc/iv/examples/advanced-examples.html) "available GMM weight functions are:
- `unadjusted`, 'homoskedastic' - Assumes moment conditions are homoskedastic
- `robust`, 'heteroskedastic' - Allows for heteroskedasticity by not autocorrelation
- `kernel` - Allows for heteroskedasticity and autocorrelation
- `cluster` - Allows for one-way cluster dependence"
- As we have heteroskedasticity and autocorrelation, we use the **`kernel`** option ==>
- Kernel (HAC)
- Kernel: bartlett
```
controls = ['const','log_nsmiles','log_income_capita','log_population','nb_airline',
'log_kjf_price','dum_dist']
instruements = ['log_nsmiles_dif1','log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_fare_dif1','log_fare_dif2','log_passengers_dif2',
'log_nsmiles_dif2','log_income_capita_dif2','log_population_dif2','nb_airline_dif2']
ivgmmmodel = IVGMM(panel_df['log_passengers'],
panel_df[controls + dum_period[:-2]],
panel_df[['log_fare','log_passengers_lag1']],
panel_df[instruements]).fit(cov_type='robust')
print(ivgmmmodel.summary)
ivgmmmodel.j_stat
```
## Testing Autocorrelation
The regression residuals are not autocorrelated ? See [reference](https://www.statsmodels.org/stable/diagnostic.html)
```
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.stats.diagnostic import acorr_breusch_godfrey
```
### 1. Ljung-Box test for no autocorrelation
```
ljungbox_test = acorr_ljungbox(ivgmmmodel.resids.values)
ljungbox_test
```
### 2. Breusch Godfrey test for no autocorrelation of residuals
```
from statsmodels.tsa.tsatools import lagmat
from statsmodels.regression.linear_model import OLS
from scipy import stats
name = ['Lagrange multiplier statistic:','Lagrange multiplier P-value:','f_statistic for F test:','P-value for F test:']
```
### 2.1. Breusch Godfrey test using GMM results:
- The following function return the Breusch Godfrey test. For more details refere to the red lines
```
def breusch_godfrey_lm(results, nlags=None, store=False):
"""
Breusch Godfrey Lagrange Multiplier tests for residual autocorrelation
Parameters:
----------
- results(Result instance): Estimation results for which the residuals are tested for serial correlation
- nlags(int): Number of lags to include in the auxiliary regression. (nlags is highest lag)
- store(bool): If store is true, then an additional class instance that contains intermediate results is returned.
Returns
-------
- lm(float): Lagrange multiplier test statistic
- lmpval(float): p-value for Lagrange multiplier test
- fval(float): fstatistic for F test, alternative version of the same test based on F test for the parameter restriction
- fpval(float): pvalue for F test
- resstore(instance – optional): a class instance that holds intermediate results. Only returned if store=True
Notes
-----
BG adds lags of residual to exog in the design matrix for the auxiliary regression with residuals as endog, see Greene 12.7.1.
References
----------
- Greene Econometrics, 5th edition
- https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.acorr_breusch_godfrey.html#statsmodels.stats.diagnostic.acorr_breusch_godfrey
"""
x = np.asarray(results.resids)
exog_old = results.model.exog.original
nobs = x.shape[0]
if nlags is None:
#for adf from Greene referencing Schwert 1989
nlags = np.trunc(12. * np.power(nobs/100., 1/4.))#nobs//4 #TODO: check default, or do AIC/BIC
nlags = int(nlags)
x = np.concatenate((np.zeros(nlags), x))
#xdiff = np.diff(x)
#
xdall = lagmat(x[:,None], nlags, trim='both')
nobs = xdall.shape[0]
xdall = np.c_[np.ones((nobs,1)), xdall]
xshort = x[-nobs:]
exog = np.column_stack((exog_old, xdall))
k_vars = exog.shape[1]
if store: resstore = ResultsStore()
resols = OLS(xshort, exog).fit()
ft = resols.f_test(np.eye(nlags, k_vars, k_vars - nlags))
fval = ft.fvalue
fpval = ft.pvalue
fval = np.squeeze(fval)[()] #TODO: fix this in ContrastResults
fpval = np.squeeze(fpval)[()]
lm = nobs * resols.rsquared
lmpval = stats.chi2.sf(lm, nlags)
# Note: degrees of freedom for LM test is nvars minus constant = usedlags
#return fval, fpval, lm, lmpval
if store:
resstore.resols = resols
resstore.usedlag = nlags
return lm, lmpval, fval, fpval, resstore
else:
return lm, lmpval, fval, fpval
breusch_godfrey_test_gmm = breusch_godfrey_lm(ivgmmmodel)
print(pd.Series(breusch_godfrey_test_gmm,
index=name))
```
### 2.2. Breusch-Pagan test using OLS results
- We use the python algorithm for the step
```
olsmodel = sm.OLS(panel_df.log_passengers,
panel_df[['log_passengers_lag1','pass_lag_resids']+controls+dum_period[:-2]]
).fit()
breusch_godfrey_test_ols = acorr_breusch_godfrey(olsmodel)
print(pd.Series(breusch_godfrey_test_ols,
index=name))
```
# GMM with `Kernel` cov_type option
- This Breusch-Pagan test used on the two results (`GMM` and `OLS`) show that we have autocorrelation.
- Hence, we have to run the `GMM`by taking into account of this `autocorrelation` and the `heteroskedastic` that was alredy test with the `White` and `Breusch–Pagan`.
- Consequently, the `kernel` `cov_type` option will be used:
- Kernel (HAC)
- Kernel: bartlett
```
controls = ['const','log_nsmiles','log_income_capita','log_population','nb_airline',
'log_kjf_price','dum_dist']
instruements = ['log_nsmiles_dif1','log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_fare_dif1','log_fare_dif2','log_passengers_dif2',
'log_nsmiles_dif2','log_income_capita_dif2','log_population_dif2','nb_airline_dif2']
ivgmmmodel = IVGMM(panel_df['log_passengers'],
panel_df[controls + dum_period[:-2]],
panel_df[['log_fare','log_passengers_lag1']],
panel_df[instruements]).fit(cov_type='kernel')
print(ivgmmmodel.summary)
fig, ax = plt.subplots(figsize=(12,8))
a = plt.axes(aspect='equal')
plt.scatter(panel_df.log_passengers.values, ivgmmmodel.predict().values, alpha=.01, c='b')
plt.title("GMM: Predicted vs True value", fontsize=27)
plt.xlabel('True Values [log_passengers]', fontsize=20)
plt.ylabel('GMM Predictions [log_passengers]', fontsize=20)
lims = [panel_df.log_passengers.min(), panel_df.log_passengers.max()]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims, c='r')
fig, ax = plt.subplots(figsize=(12, 8))
sns.distplot(ivgmmmodel.resids, bins=200, hist=True, kde='gaussian', color='b', ax=ax, norm_hist=True)
sns.distplot(ivgmmmodel.resids, bins=200, hist=False, kde='kernel', color='r', ax=ax, norm_hist=True)
ax.set_title("GMM Residuals Plot", fontsize=27)
ax.set_xlim(-1.5,1.5)
ax.set_xlabel('GMM Residuals', fontsize=20)
plt.show
```
### 3. Exogeneity Testing using GMM model
The J statistic tests whether the moment conditions are sufficiently close to zero to indicate that the model is not overidentified.
The statistic is defined as $\bar{g}'W^{-1}\bar{g} \sim \chi^2_q$
```
ivgmmmodel.j_stat
```
### 4. Exogeneity test using the augmented regression approach
Estimating the variances of u(i), assuming that sigma2(ui)=exp(a0+a1*log_fare+a2*log_nsmiles)
- Regress using OLS the `log square of the IV residuals`
- Compute `inverse of sigma` using the the square root of the exponentiel of the fitted values
### 4.1. Use the `IV2SL` `residuals` as reponse variable in the `OLS` model and compute the `inverse of the sigma`
```
controls = ['const','log_nsmiles','log_income_capita','log_population','nb_airline',
'log_kjf_price','dum_dist']
instruements = ['log_nsmiles_dif1','log_income_capita_dif1','log_population_dif1','nb_airline_dif1',
'log_fare_dif1','log_fare_dif2','log_passengers_dif2',
'log_nsmiles_dif2','log_income_capita_dif2','log_population_dif2','nb_airline_dif2']
iv2LSmodel = IV2SLS(panel_df['log_passengers'],
panel_df[controls+dum_period[:-2]],
panel_df[['log_fare','log_passengers_lag1']], panel_df[instruements]).fit()
panel_df['log_iv_residus2'] = np.log(iv2LSmodel.resids**2)
r2_aug = PanelOLS(panel_df.log_iv_residus2, panel_df[['log_fare','log_passengers_lag1']]).fit(cov_type='robust')
# computes 1/sigma to be used later as weight for correcting for heteroskedasticity
sigma_inverse = 1/(np.exp(r2_aug.predict())**.5) # np.sqrt()
```
### 4.2. Feasible Generalized Least Squares (GLS)
GLS on the augmented regression = Exogeneity test for "log_fare" allowing for heteroskedasticity
```
glsmodel = sm.GLS(panel_df['log_passengers'], panel_df[controls+['log_fare','log_passengers_lag1']+dum_period[:-2]], sigma=sigma_inverse).fit()
print(glsmodel.summary())
glsmodel.params[:9]
glsmodel.bse[:9]
```
### 4.3. GLSA model
We can use the GLSAR model with one lag, to get to a similar result:
```
glsarmodel = sm.GLSAR(panel_df['log_passengers'], panel_df[controls+['log_fare','log_passengers_lag1']+dum_period[:-2]], 1)
glsarresults = glsarmodel.iterative_fit(1)
print(glsarresults.summary())
glsarresults.params[:9]
glsarresults.bse[:9]
fig, ax = plt.subplots(figsize=(12,8))
ax = plt.axes(aspect='equal')
ax.scatter(panel_df.log_passengers.values, glsmodel.predict(), alpha=.01, c='b')
plt.title("GLS: Predicted vs True value", fontsize=27)
plt.xlabel('True Values [log_passengers]', fontsize=20)
plt.ylabel('GLS Predictions [log_passengers]', fontsize=20)
lims = [panel_df.log_passengers.min(), panel_df.log_passengers.max()]
ax.set_xlim(lims)
ax.set_ylim(lims)
_ = ax.plot(lims, lims, c='r')
fig, ax = plt.subplots(figsize=(12, 8))
sns.distplot(glsmodel.resid, bins=200, hist=True, kde='gaussian', color='b', ax=ax, norm_hist=True)
sns.distplot(glsmodel.resid, bins=200, hist=False, kde='kernel', color='r', ax=ax, norm_hist=True)
ax.set_title("GLS Residuals Plot", fontsize=27)
ax.set_xlim(-1.5,1.5)
ax.set_xlabel('GLS Residuals', fontsize=20)
plt.show
```
> GLS is the model that takes autocorrelated residuals into account, [source](https://stats.stackexchange.com/questions/254505/autocorrelation-and-gls)
## References
StatsModels – Regression Diagnostics and Specification: https://www.statsmodels.org/stable/diagnostic.html
Linearmodel 4.14 – Examples: https://bashtage.github.io/linearmodels/devel/panel/examples/examples.html
Linearmodel 4.5 – Examples: https://bashtage.github.io/linearmodels/doc/panel/examples/examples.html?highlight=white
Linearmodel 4.5 – Linear Instrumental-Variables Regression:https://bashtage.github.io/linearmodels/doc/iv/examples/advanced-examples.html
PDF – Heteroskedasticity and Autocorrelation: http://www.homepages.ucl.ac.uk/~uctpsc0/Teaching/GR03/Heter&Autocorr.pdf
PDF – (Orleans) Linear Panel Models and Heterogeneity: https://www.univ-orleans.fr/deg/masters/ESA/CH/Geneve_Chapitre1.pdf
PDF - Panel Data Models with Heterogeneity and Endogeneity https://www.ifs.org.uk/docs/wooldridge%20session%204.pdf
PDF – Instrumental Variables Estimation: http://www3.grips.ac.jp/~yamanota/Lecture%20Note%208%20to%2010%202SLS%20&%20others.pdf
Generalized Least Squares: https://www.statsmodels.org/dev/examples/notebooks/generated/gls.html
Endogenous Variables and IV Regression in Python: https://bfdykstra.github.io/2016/11/17/Endogeneity-and-Instrumental-Variable-Regression.html?fbclid=IwAR2yWXJKHUzcvqhhdX_yo4l5bn0uEa9CK09T5j9XmhCQxPKC_IIXJPdm45s
PDF – P.3 Economics 241B Estimation with Instruments: http://econ.ucsb.edu/~doug/241b/Lectures/16%20Estimation%20with%20Instruments.pdf
PDF: HOW TO TEST ENDOGENEITY OR EXOGENEITY: AN E-LEARNING HANDS ON SAS: http://www.kiran.nic.in/pdf/Social_Science/e-learning/How_to_Test_Endogeneity_or_Exogeneity_using_SAS-1.pdf
| github_jupyter |
```
import os
mingw_path = 'C:\\Users\\a1\\mingw\\mingw64\\bin'
os.environ['PATH'] = mingw_path + ';' + os.environ['PATH']
import xgboost as xgb
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
train = pd.read_csv('new_train_mean_cl.csv')
test = pd.read_csv('new_test_mean_cl.csv')
del test['Unnamed: 0']
del train['Unnamed: 0']
test = test.drop_duplicates(subset='id').set_index(keys='id').sort_index()
test = test[[u'Semana', u'Producto_ID', u'Cliente_ID', u'lag1', u'lag2', u'lag3', u'Agencia_ID', u'Canal_ID', u'Ruta_SAK',
u'Cliente_ID_town_count', u'price', u'weight', u'pieces', u'cluster_nombre', u'drink', u'w_per_piece', u'OXXO',
u'ARTELI', u'ALSUPER', u'BODEGA', u'CALIMAX', u'XICANS', u'ABARROTES', u'CARNICERIA', u'FRUTERIA',
u'DISTRIBUIDORA', u'ELEVEN', u'HOTEL', u'HOSPITAL', u'CAFE', u'FARMACIA', u'CREME', u'SUPER', u'COMOD',
u'MODELOR', u'UNKN']]
def RMSLE_score(pred, true):
score = np.power(pred-true, 2)
return np.sqrt(np.mean(score))
from sklearn import cross_validation
from sklearn.preprocessing import LabelEncoder
from xgboost.sklearn import XGBRegressor
from sklearn import grid_search
X = train
y = train['Demanda_uni_equil_log0'].copy()
del train['Demanda_uni_equil_log0']
del X['drink']
del X['DISTRIBUIDORA']
del X['ARTELI']
del X['CALIMAX']
del X['MODELOR']
del X['HOSPITAL']
del X['HOTEL']
del test['drink']
del test['DISTRIBUIDORA']
del test['ARTELI']
del test['CALIMAX']
del test['MODELOR']
del test['HOSPITAL']
del test['HOTEL']
mean_submission = pd.read_csv('submit_mean.csv').set_index(keys='id').sort_index()
for w in [6, 7]:
train_index = (train['Semana'] == w)
test_index = ~(train['Semana'] == w)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
xgbr = XGBRegressor(colsample_bytree=0.8,learning_rate=0.05, max_depth=15, n_estimators=100, reg_lambda=0.01, subsample=0.8)
xgbr.fit(X_train, y_train)
preds = xgbr.predict(X_test)
preds[preds<0] = 0
print RMSLE_score(preds, y_test)
subms = xgbr.predict(test)
mean_submission['xgb_demanda'+str(w)] = np.expm1(subms)
mean_submission.to_csv('subm_xgb_mean6.csv')
subms = xgbr.predict(test)
pd.Series(np.expm1(subms)).to_csv('subm_xgb.csv')
mean_submission = pd.read_csv('submit_mean.csv').set_index(keys='id').sort_index()
mean_submission['xgb_demanda'] = np.expm1(subms)
mean_submission['subm'] = 0.3*mean_submission['xgb_demanda6']+0.7*(0.3*mean_submission['xgb_demanda6']+0.3*mean_submission['xgb_demanda7']+
0.2*mean_submission['xgb_demanda8']+0.2*mean_submission['xgb_demanda9'])
mean_submission['subm'].to_csv('subm_xgb.csv')
mean_submission
test.head()
```
| github_jupyter |
# Access and mosaic Planet NICFI monthly basemaps
> A guide for accessing monthly Planet NICFI basemaps, selecting data by a defined AOI and mosaicing to produce a single image.
You will need a configuration file named `planet_api.cfg` (simple text file with `.cfg` extension will do) to run this notebook. It should be located in your `My Drive` folder.
The contents of the file should reflect the template below, swapping in the API access key that you should have receieved once you signed up for and subscribed to the Planet NICFI program. Please visit https://www.planet.com/nicfi/ to sign up if you have not already.
```
[credentials]
api_key = xxxxxxxxxxxxxxxxx
```
## Setup Notebook
```{admonition} **Version control**
Colab updates without warning to users, which can cause notebooks to break. Therefore, we are pinning library versions.
```
```
!pip install -q rasterio==1.2.10
!pip install -q geopandas==0.10.2
!pip install -q shapely==1.8.0
!pip install -q radiant_mlhub # for dataset access, see: https://mlhub.earth/
# import required libraries
import os, glob, functools, fnmatch, requests, io, shutil, tarfile, json
from pathlib import Path
from zipfile import ZipFile
from itertools import product
from configparser import ConfigParser
import urllib.request
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['axes.grid'] = False
mpl.rcParams['figure.figsize'] = (12,12)
import rasterio
from rasterio.merge import merge
from rasterio.plot import show
import geopandas as gpd
from folium import Map, GeoJson, Figure
from shapely.geometry import box
from IPython.display import clear_output
from radiant_mlhub import Dataset, client, get_session, Collection
# configure Radiant Earth MLHub access
!mlhub configure
# set your root directory and tiled data folders
if 'google.colab' in str(get_ipython()):
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
root_dir = '/content/gdrive/My Drive/tf-eo-devseed/'
workshop_dir = '/content/gdrive/My Drive/tf-eo-devseed-workshop'
dirs = [root_dir, workshop_dir]
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
print('Running on Colab')
else:
root_dir = os.path.abspath("./data/tf-eo-devseed")
workshop_dir = os.path.abspath('./tf-eo-devseed-workshop')
print(f'Not running on Colab, data needs to be downloaded locally at {os.path.abspath(root_dir)}')
# Go to root folder
%cd $root_dir
```
```{admonition} **GCS note!**
We won't be using Google Cloud Storage to download data, but here is a code snippet to show how to practically do so with the a placeholder "aoi" vector file. This code works if you have access to the a project on GCP.
```
```python
#authenticate Google Cloud Storage
from google.colab import auth
auth.authenticate_user()
print("Authenticated Google Gloud access.")
# Imports the Google Cloud client library
from google.cloud import storage
# Instantiates a client
project = 'tf-eo-training-project'
storage_client = storage.Client(project=project)
# The name for the new bucket
bucket_name = "dev-seed-workshop"
data_dir = os.path.join(workshop_dir,'data/')
gcs_to_local_dir = os.path.join(data_dir,'gcs/')
prefix = 'data/'
local_dir = os.path.join(gcs_to_local_dir, prefix)
dirs = [data_dir, gcs_to_local_dir, local_dir]
for dir in dirs:
if not os.path.exists(dir):
os.makedirs(dir)
bucket_name = "dev-seed-workshop"
bucket = storage_client.get_bucket(bucket_name)
blobs = bucket.list_blobs(prefix=prefix) # Get list of files
for blob in blobs:
print(blob)
filename = blob.name.replace('/', '_')
filename_split = os.path.splitext(filename)
filename_zero, fileext = filename_split
basename = os.path.basename(filename_zero)
filename = 'aoi'
blob.download_to_filename(os.path.join(local_dir, "%s%s" % (basename, fileext))) # Download
print(blob, "%s%s" % (basename, fileext))
```
### Get search parameters
- Read the AOI from a [Radiant Earth MLHub dataset](https://mlhub.earth/data/ref_african_crops_kenya_01) that overlaps with NICFI coverage into a Geopandas dataframe.
- Get AOI bounds and centroid.
- Authenticate with Planet NICFI API key.
- Choose mosaic based on month/year of interest.
```
collections = [
'ref_african_crops_kenya_01_labels'
]
def download(collection_id):
print(f'Downloading {collection_id}...')
collection = Collection.fetch(collection_id)
path = collection.download('.')
tar = tarfile.open(path, "r:gz")
tar.extractall()
tar.close()
os.remove(path)
def resolve_path(base, path):
return Path(os.path.join(base, path)).resolve()
def load_df(collection_id):
collection = json.load(open(f'{collection_id}/collection.json', 'r'))
rows = []
item_links = []
for link in collection['links']:
if link['rel'] != 'item':
continue
item_links.append(link['href'])
for item_link in item_links:
item_path = f'{collection_id}/{item_link}'
current_path = os.path.dirname(item_path)
item = json.load(open(item_path, 'r'))
tile_id = item['id'].split('_')[-1]
for asset_key, asset in item['assets'].items():
rows.append([
tile_id,
None,
None,
asset_key,
str(resolve_path(current_path, asset['href']))
])
for link in item['links']:
if link['rel'] != 'source':
continue
link_path = resolve_path(current_path, link['href'])
source_path = os.path.dirname(link_path)
try:
source_item = json.load(open(link_path, 'r'))
except FileNotFoundError:
continue
datetime = source_item['properties']['datetime']
satellite_platform = source_item['collection'].split('_')[-1]
for asset_key, asset in source_item['assets'].items():
rows.append([
tile_id,
datetime,
satellite_platform,
asset_key,
str(resolve_path(source_path, asset['href']))
])
return pd.DataFrame(rows, columns=['tile_id', 'datetime', 'satellite_platform', 'asset', 'file_path'])
for c in collections:
download(c)
# Load the shapefile into a geopandas dataframe (for more info see: https://geopandas.org/en/stable/)
gdf = gpd.read_file(os.path.join(root_dir, 'ref_african_crops_kenya_01_labels/ref_african_crops_kenya_01_labels_00/labels.geojson'))
gdf = gdf.to_crs("EPSG:4326")
# Get AOI bounds
bbox_aoi = gdf.geometry.total_bounds
# Get AOI centroid for plotting with folium
centroid_aoi = [box(*bbox_aoi).centroid.x, box(*bbox_aoi).centroid.y]
# authenticate with Planet NICFI API KEY
config = ConfigParser()
configFilePath = '/content/gdrive/My Drive/planet_api.cfg'
with open(configFilePath) as f:
config.read_file(f)
API_KEY = config.get('credentials', 'api_key')
PLANET_API_KEY = API_KEY # <= insert API key here
#setup Planet base URL
API_URL = "https://api.planet.com/basemaps/v1/mosaics"
#setup session
session = requests.Session()
#authenticate
session.auth = (PLANET_API_KEY, "") #<= change to match variable for API Key if needed
```
```{important}
In the following cell, the **name__is** parameter is the basemap name. It is only differentiable by the time range in the name.
E.g. `planet_medres_normalized_analytic_2021-06_mosaic` is for June, 2021.
```
```
#set params for search using name of mosaic
parameters = {
"name__is" :"planet_medres_normalized_analytic_2021-06_mosaic" # <= customized to month/year of interest
}
#make get request to access mosaic from basemaps API
res = session.get(API_URL, params = parameters)
#response status code
print(res.status_code)
#print metadata for mosaic
mosaic = res.json()
#print("mosaic metadata (this will expose your API key so be careful about if/where you uncomment this line): ", json.dumps(mosaic, indent=2))
#get id
mosaic_id = mosaic['mosaics'][0]['id']
#get bbox for entire mosaic
mosaic_bbox = mosaic['mosaics'][0]['bbox']
print("mosaic_bbox: ", mosaic_bbox)
print("bbox_aoi: ", bbox_aoi)
#converting bbox to string for search params
string_bbox = ','.join(map(str, bbox_aoi))
print('Mosaic id: ', mosaic_id)
```
#### Plot the gridded AOI.
```
m = Map(tiles="Stamen Terrain",
control_scale=True,
location = [centroid_aoi[1], centroid_aoi[0]],
zoom_start = 10,
max_zoom = 20,
min_zoom =6,
width = '100%',
height = '100%',
zoom_control=False )
GeoJson(gdf).add_to(m)
Figure(width=500, height=300).add_child(m)
```
### Request the quad tiles fitting the search parameters
```
#search for mosaic quad using AOI
search_parameters = {
'bbox': string_bbox,
'minimal': True
}
#accessing quads using metadata from mosaic
quads_url = "{}/{}/quads".format(API_URL, mosaic_id)
res = session.get(quads_url, params=search_parameters, stream=True)
print(res.status_code)
quads = res.json()
quads = res.json()
items = quads['items']
#printing an example of quad metadata
#print("quad tiles metadata (this will expose your API key so be careful about if/where you uncomment this line): ", json.dumps(items[0], indent=2))
```
#### Plot the requested quad tiles.
```
for item, i in zip(items, range(len(items))):
quad_box = item["bbox"]
GeoJson(box(*quad_box)).add_to(m)
Figure(width=500, height=300).add_child(m)
# Set directory for downloading the quad tiles to
nicfi_dir = os.path.join(root_dir,'062021_basemap_nicfi_aoi/')
quads_dir = os.path.join(nicfi_dir,'quads/')
dirs = [nicfi_dir, quads_dir]
for dir in dirs:
if not os.path.exists(dir):
os.makedirs(dir)
#iterate over quad download links and saving to folder by id
for i in items:
link = i['_links']['download']
name = i['id']
name = name + '.tiff'
DIR = quads_dir
filename = os.path.join(DIR, name)
#print(filename)
#checks if file already exists before s
if not os.path.isfile(filename):
urllib.request.urlretrieve(link, filename)
```
### Mosaic the quad tiles
```
# File and folder paths
out_mosaic = os.path.join(nicfi_dir,'062021_basemap_nicfi_aoi_Mosaic.tif')
# Make a search criteria to select the quad tile files
search_criteria = "*.tiff"
q = os.path.join(nicfi_dir,'quads', search_criteria)
print(q)
# Get all of the quad tiles
quad_files = glob.glob(q)
quad_files
src_files_to_mosaic = []
for f in quad_files:
src = rasterio.open(f)
src_files_to_mosaic.append(src)
# Create the mosaic
mosaic, out_trans = merge(src_files_to_mosaic)
out_meta = src.meta.copy()
out_meta.update({"driver": "GTiff",
"height": mosaic.shape[1],
"width": mosaic.shape[2],
"transform": out_trans
}
)
# Write the mosaic to raster file
with rasterio.open(out_mosaic, "w", **out_meta) as dest:
dest.write(mosaic)
# Write true color (RGB).
rgb_out_mosaic = os.path.join(nicfi_dir,'062021_basemap_nicfi_aoi_rgb_Mosaic.tif')
out_meta.update({"count": 3})
print(out_meta)
rgb = np.dstack([mosaic[2], mosaic[1], mosaic[0]])
rgb = rgb.transpose(2,0,1)
with rasterio.open(rgb_out_mosaic, "w", **out_meta) as dest:
dest.write(rgb)
```
#### Plot the mosaic
```
src = rasterio.open(rgb_out_mosaic)
show(src)
```
| github_jupyter |
## Preprocessing
```
# Import our dependencies
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd
import tensorflow as tf
# Import and read the charity_data.csv.
import pandas as pd
df = pd.read_csv("../Resources/charity_data.csv")
df.head()
# df.describe()
# df['AFFILIATION'].nunique()
df.shape
# importing sweetviz
import sweetviz as sv
#analyzing the dataset
charity_report = sv.analyze(df)
#display the report
charity_report.show_html('Charity.html')
# Drop the non-beneficial ID columns, 'EIN' and 'NAME'.
df = df.drop("EIN", 1)
df = df.drop("NAME", 1)
# Determine the number of unique values in each column.
df.nunique()
# Look at APPLICATION_TYPE value counts for binning
df['APPLICATION_TYPE'].value_counts()
# Choose a cutoff value and create a list of application types to be replaced
# use the variable name `application_types_to_replace`
# Replace in dataframe
application_types_to_replace = ["T9","T13","T12","T2","T25","T14","T29","T15","T17"]
for app in application_types_to_replace:
df['APPLICATION_TYPE'] = df['APPLICATION_TYPE'].replace(app,"Other")
# Check to make sure binning was successful
df['APPLICATION_TYPE'].value_counts()
# Look at CLASSIFICATION value counts for binning
class_df = df['CLASSIFICATION'].value_counts()
classifications_to_replace = class_df[(class_df < 1000)].index
# Replace in dataframe
for cls in classifications_to_replace:
df['CLASSIFICATION'] = df['CLASSIFICATION'].replace(cls,"Other")
# Check to make sure binning was successful
df['CLASSIFICATION'].value_counts()
import numpy as np
df['ASK_AMT'] = np.log10(df['ASK_AMT'])
df.head()
# Convert categorical data to numeric with `pd.get_dummies`
df = pd.get_dummies(df,drop_first = True)
list(df)
# Split our preprocessed data into our features and target arrays
X = df.drop('IS_SUCCESSFUL',axis=1)
y = df['IS_SUCCESSFUL']
# Split the preprocessed data into a training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=78)
# Create a StandardScaler instances
scaler = StandardScaler()
# Fit the StandardScaler
X_scaler = scaler.fit(X_train)
# Scale the data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
inputs = len(X.columns)
```
## Compile, Train and Evaluate the Model
```
# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.
# YOUR CODE GOES HERE
nn = tf.keras.models.Sequential()
# First hidden layer
nn.add(tf.keras.layers.Dense(units=9, activation="relu", input_dim=inputs))
# Second hidden layer
nn.add(tf.keras.layers.Dense(units=9, activation="relu"))
# Output layer
nn.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
# Check the structure of the model
nn.summary()
# Compile the model
nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
from keras.callbacks import Callback
mc = tf.keras.callbacks.ModelCheckpoint('weights{epoch:08d}.h5',save_weights_only=True, period=5)
# Train the model
fit_model = nn.fit(X_train_scaled, y_train, callbacks=[mc], epochs=10)
# Evaluate the model using the test data
model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# Export our model to HDF5 file
nn.save("AlphabetSoupCharity.h5")
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Array/spectral_unmixing.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Array/spectral_unmixing.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Array/spectral_unmixing.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Array/spectral_unmixing.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
```
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
```
## Add Earth Engine Python script
```
# Add Earth Engine dataset
# Array-based spectral unmixing.
# Create a mosaic of Landsat 5 images from June through September, 2007.
allBandMosaic = ee.ImageCollection('LANDSAT/LT05/C01/T1') \
.filterDate('2007-06-01', '2007-09-30') \
.select('B[0-7]') \
.median()
# Create some representative endmembers computed previously by sampling
# the Landsat 5 mosaic.
urbanEndmember = [88, 42, 48, 38, 86, 115, 59]
vegEndmember = [50, 21, 20, 35, 50, 110, 23]
waterEndmember = [51, 20, 14, 9, 7, 116, 4]
# Compute the 3x7 pseudo inverse.
endmembers = ee.Array([urbanEndmember, vegEndmember, waterEndmember])
inverse = ee.Image(endmembers.matrixPseudoInverse().transpose())
# Convert the bands to a 2D 7x1 array. The toArray() call concatenates
# pixels from each band along the default axis 0 into a 1D vector per
# pixel, and the toArray(1) call concatenates each band (in this case
# just the one band of 1D vectors) along axis 1, forming a 2D array.
inputValues = allBandMosaic.toArray().toArray(1)
# Matrix multiply the pseudo inverse of the endmembers by the pixels to
# get a 3x1 set of endmembers fractions from 0 to 1.
unmixed = inverse.matrixMultiply(inputValues)
# Create and show a colored image of the endmember fractions. Since we know
# the result has size 3x1, project down to 1D vectors at each pixel (since the
# second axis is pointless now), and then flatten back to a regular scalar
# image.
colored = unmixed \
.arrayProject([0]) \
.arrayFlatten([['urban', 'veg', 'water']])
Map.setCenter(-98.4, 19, 11)
# Load a hillshade to use as a backdrop.
Map.addLayer(ee.Algorithms.Terrain(ee.Image('CGIAR/SRTM90_V4')).select('hillshade'))
Map.addLayer(colored, {'min': 0, 'max': 1},
'Unmixed (red=urban, green=veg, blue=water)')
```
## Display Earth Engine data layers
```
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
```
| github_jupyter |
# 选择
## 布尔类型、数值和表达式

- 注意:比较运算符的相等是两个等号,一个等到代表赋值
- 在Python中可以用整型0来代表False,其他数字来代表True
- 后面还会讲到 is 在判断语句中的用发
```
a = id(1)
b = id(1)
print(a,b)
# 因为a和b并不是同一个对象
a is b
a = id(1)
b = a
a is b
a = True
b = False
id(True)
a == b
a is b
```
## 字符串的比较使用ASCII值
```
a = "jokar"
b = "jokar"
a > b
```
## Markdown
- https://github.com/younghz/Markdown
肯定会给发数据划分啊
$\sum_{j=1}^{N}x_{j}$
## EP:
- <img src="../Photo/34.png"></img>
- 输入一个数字,判断其实奇数还是偶数
## 产生随机数字
- 函数random.randint(a,b) 可以用来产生一个a和b之间且包括a和b的随机整数
```
import random
random.randint(0,1)
if condition:
do someething
else:
other
for iter_ in xxx:
do something
age = 10
Joker = eval(input('Name'))
print(Joker)
```
产生一个随机数,你去输入,如果你输入的数大于随机数,那么就告诉你太大了,反之,太小了,
然后你一直输入,知道它满意为止
```
number =random.randint(0,5)
for i in range(5):
input_ = eval(input('>>'))
if input_ > number:
print('太大啦')
if input_ < number:
print('太小啦')
if number == input_:
print('正好')
break
for i in range(5):
print(i)
```
## 其他random方法
- random.random 返回0.0到1.0之间前闭后开区间的随机浮点
- random.randrange(a,b) 前闭后开
```
random.random()
import matplotlib.pyplot as plt
image=plt.imread('/Users/huwang/Downloads/cat.jpeg')
print(image*random.random())
plt.imshow(image)
```
## EP:
- 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字的和,并判定其是否正确
- 进阶:写一个随机序号点名程序
```
number_1 = random.randrange(0,10)
number_2 = random.randrange(0,10)
while 1:
sum_ = eval(input('>>'))
if sum_ == (number_1 + number_2):
print('Congratulations! Correct~')
else:
print('Sorry~SB.')
```
## if语句
- 如果条件正确就执行一个单向if语句,亦即当条件为真的时候才执行if内部的语句
- Python有很多选择语句:
> - 单向if
- 双向if-else
- 嵌套if
- 多向if-elif-else
- 注意:当语句含有子语句的时候,那么一定至少要有一个缩进,也就是说如果有儿子存在,那么一定要缩进
- 切记不可tab键和space混用,单用tab 或者 space
- 当你输出的结果是无论if是否为真时都需要显示时,语句应该与if对齐
```
input_ = eval(input('>>'))
if input_ > number:
print('太大啦')
if input_ < number:
print('太小啦')
if number == input_:
print('正好')
print('不要灰心')
```
李文浩相亲测试树
年龄
老 年轻
拜拜
帅
否 是
考虑一下 老婆
没有 有
马上结婚 回家的诱惑
代码写不出来的立马分手,从此社会上有多出一个渣男/渣女.
```
age = input('年轻嘛[y/n]')
if age == 'y':
handsome = input('帅否[y/n]')
if handsome == 'y':
wife = input('有没有老婆[y/n]')
if wife == 'y':
print('回家的诱惑')
else:
print('立马结婚')
else:
print('考虑一下')
else:
print('拜拜~')
```
## EP:
- 用户输入一个数字,判断其实奇数还是偶数
- 进阶:可以查看下4.5实例研究猜生日
## 双向if-else 语句
- 如果条件为真,那么走if内部语句,否则走else内部语句
## EP:
- 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字,并判定其是否正确,如果正确打印“you‘re correct”,否则打印正确错误
## 嵌套if 和多向if-elif-else

```
if score >= 80:
gread = 'B'
elif score>=90:
gread = 'A'
```
## EP:
- 提示用户输入一个年份,然后显示表示这一年的动物

- 计算身体质量指数的程序
- BMI = 以千克为单位的体重除以以米为单位的身高的平方

```
tizhong = eval(input('体重'))
shengao = eval(input('身高'))
BMI = tizhong / shengao ** 2
if BMI<18.5 :
print('超清')
elif 18.5<=BMI<25 :
print('标准')
elif 25<=BMI<30 :
print('超重')
else:
print('超级无敌胖')
```
## 逻辑运算符



## EP:
- 判定闰年:一个年份如果能被4整除但不能被100整除,或者能被400整除,那么这个年份就是闰年
- 提示用户输入一个年份,并返回是否是闰年
- 提示用户输入一个数字,判断其是否为水仙花数
## 实例研究:彩票

```
import random
number = random.randint(10,99)
print(number)
N = input('>>')
number_shi = number // 10
number_ge = number % 10
if N[0] == '0':
N_shi = 0
else:
N_shi = int(N) // 10
N_ge = int(N) % 10
if number == int(N):
print('10000')
# elif (number_shi == N_shi or number_shi==N_ge) and (number_ge == N_shi or number_ge==N_ge):
elif number_shi + number_ge == N_shi + N_ge:
print('3000')
elif (number_ge ==N_ge or number_ge == N_shi) or (number_shi == N_ge or number_shi == N_shi):
print('1000')
a = "05"
a[0]
05 // 10
Number = eval(input('>>'))
bai = Number // 100
shi = Number // 10 % 10
ge = Number % 10
if bai**3 + shi **3 + ge **3 == Number:
print('水仙花')
else:
print('不是水仙花')
223 // 10
```
# Homework
- 1

```
a,b,c = map(float,input('Enter a, b, c:').split(','))
if b ** 2 - 4 * a * c > 0:
r1 = (-b + (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
r2 = (-b - (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
print('The roots are%.6f and %.6f' %(r1,r2))
elif b ** 2 - 4 * a * c == 0:
r1 = (-b + (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
print('有一个根为%.1f' %r1)
else:
print('The equation has no reeal roots')
```
- 2

```
import random
a = random.randrange(0,100)
b = random.randrange(0,100)
print(a)
print(b)
h = a + b
c = eval(input('请输入这两个数的和'))
if c == h:
print('you are right')
else :
print('you are pig')
```
- 3

```
today = int(input('Enter today`s day: '))
future = int(input('Enter the nuber of days elapssed since today: '))
s = ( today + future ) % 7
if today ==1:
xingqi= 'Monday'
if today ==2:
xingqi= 'Tuesday'
if today ==3:
xingqi= 'Wednesday'
if today ==4:
xingqi= 'Thursday'
if today ==5:
xingqi= 'Friday'
if today ==6:
xingqi= 'Saturday'
if today ==0:
xingqi= 'Sunday'
if s==1:
xq='Monday'
elif s==2:
xq='Tuesday'
elif s==3:
xq='Wednesday'
elif s==4:
xq='Thursday'
elif s==5:
xq='Friday'
elif s==6:
xq='Staurday'
elif s==0:
xq='Sunday'
print('Today is %s and the future day is %s' % (xingqi,xq))
```
- 4

```
a,b,c = eval(input('请输入三个整数: '))
if a<b<c:
print(a,b,c)
elif a<c<b:
print(a,c,b)
elif b<a<c:
print(b,a,c)
elif b<c<a:
print(b,c,a)
elif c<a<b:
print(c,a,b)
else :
print(c,b,a)
```
- 5

```
w1,p1 = eval(input('Enter weight and price for package 1: '))
w2,p2 = eval(input('Enter weight and price for package 2: '))
a = p1/ w1
b = p2/ w2
if a>b:
print('Package 2 has thebetter price')
else:
print('Package 1 has thebetter price')
```
- 6

```
month,year=map(int,input('Enter month and year:').split(','))
day1=1,3,5,7,8,10,12
day2=4,6,9,11
if month in day1:
print('%d月有31天'%month)
if month==2:
if (year % 4 ==0 and year % 100 !=0 ) or (year % 400 == 0):
print('二月份有29天')
else:
print('二月份有28天')
if month in day2:
print('%d月份有30天'%month)
```
- 7

```
import numpy as np
guess=input('请输入你的猜测:')
a = np.random.choice(['正面','反面'])
if guess == a:
print('you are right!')
else:
print('you are worry~')
```
- 8

```
import random
s = random.randrange(0,3)
r = int(input('石头剪刀布:'))
if r == s:
print("The computer is {}. You are {} too.It is a draw.".format(s,r))
elif (r == 0 and s == 1) or (r == 1 and s == 2) or (r == 2 and s == 0):
print("The computer is {}. You are {}.You won.".format(s,r))
else:
print("The computer is {}. You are {} too.You lose.".format(s,r))
```
- 9

```
year = int(input('Enter year:(e.g.,2008): '))
month = int(input('Enter month: 1-12:'))
q = int(input('Enter the day of the month:1-31: '))
if month ==13 or month ==14:
year=year-1
a=int((26*(month +1)/10)//1)
k = int(year % 100)
j = year / 100 //1
h = int((q + a + k + (k / 4)//1 + (j/ 4)//1 + 5 * j) % 7)
if h == 2:
xq= 'Monday'
elif h == 3:
xq= 'Tuesday'
elif h == 4:
xq= 'Wednesday'
elif h == 5:
xq= 'Thursday'
elif h == 6:
xq= 'Friday'
elif h ==0:
xq= 'Saturday'
elif h ==1:
xq= 'Sunday'
print('Day of the week is %s' % xq)
```
- 10

```
import numpy as np
huase = np.random.choice(['梅花','红桃','方块','黑桃'])
daxiao = random.choice(['Ace','1','2','3','4','5','6','7','8','9','10','Jack','Queen','King'])
print('The card you picked is the %s of %s ' %(daxiao,huase))
```
- 11

```
n = int(input('Enter a three-digit integer: '))
a = n//100%10
b = n//10%10
c = n%10
m = c * 100 + b * 10 + a
if m ==n:
print('%d is a palindrome' % n)
else :
print('%d is not a palindrome'% n )
a,b,c = eval(input('Enter three edges:'))
l = a+b+c
if a + b > c and a + c > b and b + c > a:
print('The perimeter is %d'% l)
else :
print ('输入非法')
```
| github_jupyter |
## Day 14
https://adventofcode.com/2020/day/14
```
import aocd
lines = [line for line in aocd.get_data(day=14, year=2020).splitlines()]
len(lines)
lines[:5]
```
### Solution to Part 1
```
def maskable(value: int) -> list:
return list(bin(value)[2:].zfill(36))
def mask_value(value: int, *, mask: str) -> int:
value = maskable(value)
assert len(mask) == 36
assert len(value) == 36
masked = []
for (mask_bit, value_bit) in zip(mask, value):
bit = value_bit if mask_bit == 'X' else mask_bit
masked.append(bit)
return int(''.join(masked), 2)
mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X'
mask_value(11, mask=mask)
mask_value(101, mask=mask)
def parse_line(line: str):
cmd, value = line.split(' = ')
if cmd == 'mask':
return ('mask', value)
else:
addr = int(cmd[4:-1])
value = int(value)
return ('mem', (addr, value))
def store_mem(lines):
mask = None
for line in lines:
cmd, value = parse_line(line)
if cmd == 'mask':
mask = value
elif cmd == 'mem':
addr, value = value
yield addr, mask_value(value, mask=mask)
else:
raise RuntimeError('cannot happen')
mem = {addr: value for addr, value in store_mem(lines)}
len(mem)
sum(mem.values())
```
### Solution to Part 2
```
def overwrite_floating(masked: str, *, bits: str) -> str:
assert masked.count('X') == len(bits)
overwritten = []
bit = iter(bits)
for c in masked:
if c == 'X':
overwritten.append(next(bit))
else:
overwritten.append(c)
return int(''.join(overwritten), 2)
def floating_addrs(masked: str, *, floating: int) -> list:
floating_bits = [
bin(i)[2:].zfill(floating)
for i in range(2 ** floating)
]
return [
overwrite_floating(masked, bits=bits)
for bits in floating_bits
]
def masked_addrs(addr: int, *, mask: str) -> list:
addr = maskable(addr)
assert len(mask) == 36
assert len(addr) == 36
masked = []
for (mask_bit, addr_bit) in zip(mask, addr):
if mask_bit == '0':
bit = addr_bit
elif mask_bit == '1':
bit = '1'
elif mask_bit == 'X':
bit = 'X'
else:
raise RuntimeError('cannot happen')
masked.append(bit)
floating = masked.count('X')
if floating == 0:
return [int(''.join(masked), 2)]
return floating_addrs(masked, floating=floating)
mask = '000000000000000000000000000000X1001X'
len(masked_addrs(42, mask=mask))
def store_mem_part2(lines):
mask = None
for line in lines:
cmd, value = parse_line(line)
if cmd == 'mask':
mask = value
elif cmd == 'mem':
addr, value = value
for masked_addr in masked_addrs(addr, mask=mask):
yield masked_addr, value
else:
raise RuntimeError('cannot happen')
mem = {addr: value for addr, value in store_mem_part2(lines)}
len(mem)
sum(mem.values())
```
| github_jupyter |
# Lists
Data Structure:
A data structure is a collection of data elements (such as numbers or characters—or even other data structures) that is structured in some way, for example, by numbering the elements. The most basic data structure in Python is the "sequence".
-> List is one of the Sequence Data structure
-> Lists are collection of items (Strings, integers or even other lists)
-> Lists are enclosed in [ ]
-> Each item in the list has an assigned index value.
-> Each item in a list is separated by a comma
-> Lists are mutable, which means they can be changed.
# List Creation
```
emptyList = []
lst = ['one', 'two', 'three', 'four'] # list of strings
lst2 = [1, 2, 3, 4] #list of integers
lst3 = [[1, 2], [3, 4]] # list of lists
lst4 = [1, 'ramu', 24, 1.24] # list of different datatypes
print(lst4)
```
# List Length
```
lst = ['one', 'two', 'three', 'four']
#find length of a list
print(len(lst))
```
# List Append
```
lst = ['one', 'two', 'three', 'four']
lst.append('five') # append will add the item at the end
print(lst)
```
# List Insert
```
#syntax: lst.insert(x, y)
lst = ['one', 'two', 'four']
lst.insert(2, "three") # will add element y at location x
print(lst)
```
# List Remove
```
#syntax: lst.remove(x)
lst = ['one', 'two', 'three', 'four', 'two']
lst.remove('two') #it will remove first occurence of 'two' in a given list
print(lst)
```
# List Append & Extend
```
lst = ['one', 'two', 'three', 'four']
lst2 = ['five', 'six']
#append
lst.append(lst2)
print(lst)
lst = ['one', 'two', 'three', 'four']
lst2 = ['five', 'six']
#extend will join the list with list1
lst.extend(lst2)
print(lst)
```
# List Delete
```
#del to remove item based on index position
lst = ['one', 'two', 'three', 'four', 'five']
del lst[1]
print(lst)
#or we can use pop() method
a = lst.pop(1)
print(a)
print(lst)
lst = ['one', 'two', 'three', 'four']
#remove an item from list
lst.remove('three')
print(lst)
```
# List realted keywords in Python
```
#keyword 'in' is used to test if an item is in a list
lst = ['one', 'two', 'three', 'four']
if 'two' in lst:
print('AI')
#keyword 'not' can combined with 'in'
if 'six' not in lst:
print('ML')
```
# List Reverse
```
#reverse is reverses the entire list
lst = ['one', 'two', 'three', 'four']
lst.reverse()
print(lst)
```
# List Sorting
The easiest way to sort a List is with the sorted(list) function.
That takes a list and returns a new list with those elements in sorted order.
The original list is not changed.
The sorted() optional argument reverse=True, e.g. sorted(list, reverse=True),
makes it sort backwards.
```
#create a list with numbers
numbers = [3, 1, 6, 2, 8]
sorted_lst = sorted(numbers)
print("Sorted list :", sorted_lst)
#original list remain unchanged
print("Original list: ", numbers)
#print a list in reverse sorted order
print("Reverse sorted list :", sorted(numbers, reverse=True))
#orginal list remain unchanged
print("Original list :", numbers)
lst = [1, 20, 5, 5, 4.2]
#sort the list and stored in itself
lst.sort()
# add element 'a' to the list to show an error
print("Sorted list: ", lst)
lst = [1, 20, 'b', 5, 'a']
print(lst.sort()) # sort list with element of different datatypes.
```
# List Having Multiple References
```
lst = [1, 2, 3, 4, 5]
abc = lst
abc.append(6)
#print original list
print("Original list: ", lst)
```
# String Split to create a list
```
#let's take a string
s = "one,two,three,four,five"
slst = s.split(',')
print(slst)
s = "This is applied AI Course"
split_lst = s.split() # default split is white-character: space or tab
print(split_lst)
```
# List Indexing
Each item in the list has an assigned index value starting from 0.
Accessing elements in a list is called indexing.
```
lst = [1, 2, 3, 4]
print(lst[1]) #print second element
#print last element using negative index
print(lst[-2])
```
# List Slicing
Accessing parts of segments is called slicing.
The key point to remember is that the :end value represents the first value that
is not in the selected slice.
```
numbers = [10, 20, 30, 40, 50,60,70,80]
#print all numbers
print(numbers[:])
#print from index 0 to index 3
print(numbers[0:4])
print (numbers)
#print alternate elements in a list
print(numbers[::2])
#print elemnts start from 0 through rest of the list
print(numbers[2::2])
```
# List extend using "+"
```
lst1 = [1, 2, 3, 4]
lst2 = ['varma', 'naveen', 'murali', 'brahma']
new_lst = lst1 + lst2
print(new_lst)
```
# List Count
```
numbers = [1, 2, 3, 1, 3, 4, 2, 5]
#frequency of 1 in a list
print(numbers.count(1))
#frequency of 3 in a list
print(numbers.count(3))
```
# List Looping
```
#loop through a list
lst = ['one', 'two', 'three', 'four']
for ele in lst:
print(ele)
```
# List Comprehensions
List comprehensions provide a concise way to create lists.
Common applications are to make new lists where each element is the result of some operations applied to each member of another sequence or iterable, or to create a subsequence of those elements that satisfy a certain condition.
```
# without list comprehension
squares = []
for i in range(10):
squares.append(i**2) #list append
print(squares)
#using list comprehension
squares = [i**2 for i in range(10)]
print(squares)
#example
lst = [-10, -20, 10, 20, 50]
#create a new list with values doubled
new_lst = [i*2 for i in lst]
print(new_lst)
#filter the list to exclude negative numbers
new_lst = [i for i in lst if i >= 0]
print(new_lst)
#create a list of tuples like (number, square_of_number)
new_lst = [(i, i**2) for i in range(10)]
print(new_lst)
```
# Nested List Comprehensions
```
#let's suppose we have a matrix
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]
]
#transpose of a matrix without list comprehension
transposed = []
for i in range(4):
lst = []
for row in matrix:
lst.append(row[i])
transposed.append(lst)
print(transposed)
#with list comprehension
transposed = [[row[i] for row in matrix] for i in range(4)]
print(transposed)
```
| github_jupyter |
<div align="right"><a href="https://github.com/lucasliano/Medidas1">Link Github</a></div>
<img src="logo.jpg" width="400"></img>
<div align="center">
<h1>Resúmen Teórico de Medidas Electrónicas 1</h1>
<h2>Incertidumbre</h2>
<h3>Liaño, Lucas</h3>
</div>
# Contenidos
- **Introducción**
- **Marco Teórico**
- Conceptos Básicos Metrología
- ¿Qué es la incertidumbre?
- Modelo matemático de una medición ($Y$)
- Evaluación incertidumbre Tipo A
- Evaluación incertidumbre Tipo B
- Incertidumbre Conjunta
- Grado de Confianza
- Caso de análisis: $u_{i}(x_{i}) \gg u_{j}(X_{i})$
- Caso de análisis: $u_{i}(x_{i}) \ll u_{j}(X_{i})$
- Correlación
- **Experimentación**
- Caso General
- Caso Incertidumbre tipo A dominante
- Caso Incertidumbre tipo B dominante
- Ejemplo Correlación
- **Bibliografía**
***
# Introducción
El objetivo del presente documento es de resumir, al mismo tiempo que simular, los contenidos teóricos correspondientes a la unidad N°1 de la materia medidas 1. Para ello, utilizaremos los recursos disponibles en el drive de la materia.
<div class="alert alert-success">
<strong>Link:</strong> <a href="https://drive.google.com/folderview?id=1p1eVB4UoS0C-5gyienup-XiewKsTpcNc">https://drive.google.com/folderview?id=1p1eVB4UoS0C-5gyienup-XiewKsTpcNc</a>
</div>
***
# Marco Teórico
## Conceptos Básicos Metrología
La de medición de una magnitud física, atributo de un cuerpo mensurable, consiste en el proceso mediante el cual se da a conocer el valor de dicha magnitud. A lo largo de la historia se han desarrollado diversos modelos de medición, todos ellos consisten en la comparación de la magnitud contra un patrón.
A su vez, a medida que se fueron confeccionando mejores métodos de medición, se empezó a tener en consideración el error en la medida. Este error consiste en una indicación cuantitativa de la calidad del resultado. Valor que demuestra la confiabilidad del proceso.
Actualmente, definimos al **resultado de una medición** como al conjunto de valores de una magnitud, atribuidos a un mensurando. Se puede definir a partir de una función distribución densidad de probabilidad (también denomidada _pdf_, de la sígla inglesa _probability density function_). El resultado de una medición está caracterizado por la media de la muestra, la incertidumbre y el grado de confianza de la medida.
Denominaremos **incertidumbre de una medición** al parámetro asociado con el resultado de la medición que caracteríza la dispersión de los valores atribuidos a un mensurando. Mientras que el **error de medida** será la diferencia entre el valor medido con un valor de referencia. [[1]](http://depa.fquim.unam.mx/amyd/archivero/CALCULODEINCERTIDUMBRESDR.JAVIERMIRANDA_26197.pdf)
#### Tipos de errores
Existen dos tipos:
> **Error sistemático:** Componente del error que en repetidas mediciones permanece constante.
> **Error aleatorio:** Componente del error que en repetidas mediciones varía de manera impredecible.
***
## ¿Qué es la incertidumbre?
Como bien definimos anteriormente, la incertidumbre es un parámetro que caracteríza la dispersión de los valores atribuidos a un mensurando. Esto significa que, considerando al resultado de la medición como una función distribución densidad de probabilidad, la incertidumbre representa el desvío estándar de la misma. Se suele denominar **incertidumbre estándar** a dicha expresión de la incertidumbre.
#### Componentes de la incertidumbre
> **Tipo A:** Componente de la incertidumbre descripta únicamente a partir del estudio estadístico de las muestras.
> **Tipo B:** Componente de la incertidumbre descripta a partir de las hojas de datos previstas por los fabricantes de los instrumentos de medición, junto con datos de calibración.
En las próximas secciones se describe en detalle como son los test efectuados para determinar cada una de las componentes. [[2]](https://es.wikipedia.org/wiki/Propagaci%C3%B3n_de_errores)
***
## Modelo matemático de una medición ($Y$)
Supongamos una magnitud a mensurar ($Y$), la cual se va a estimar de forma indirecta a partir de una relación fundamental con otras $N$ magnitudes mensurables, de manera que se cumple:
\begin{equation}
Y = f(x_{1},x_{2},...,x_{N})
\end{equation}
Como definimos previamente, las variables $x_{i}$ son funciones distribución densidad de probabilidad por ser resultados de mediciones. Cada una de estas mediciones viene determinada, idealmente, por el valor de su media ($\mu_{X_{i}}$), su desvío estándar ($\sigma_{x_{i}}$) y el grado de confianza de la medición. Dado que en la vida real no es posible conseguir una estimación lo suficientemente buena de estos parámetros, se utilizarán sus estimadores en su lugar.
Por tanto, si se tomaron $M$ muestras de cada una de estas variables, podemos utilizar la **media poblacional ($\bar{Y}$)** como estimador de la media ($\mu_{Y}$) de la distribución densidad de probabilidad de la medición como:
\begin{equation}
\hat{Y} = \bar{Y} = \frac{1}{M} \sum_{k=0}^{M} f_{k}(x_{1},x_{2},...,x_{N}) = f(\bar{X_{1}},\bar{X_{2}},...,\bar{X_{N}})
\end{equation}
<div class="alert alert-danger">
<strong>Verificar que esto este bien.</strong> Sospecho que no porque estamos suponiendo que podes aplicar linealidad adentro de la función. Estoy leyendo el ejemplo del calculo de resistencia y hacemos "resistencia= (media_V/media_I)" en la línea 39 del documento compartido en el canal general de Slack.
</div>
Asimismo, para determinar el otro parámetro fundamental de la medición (la incertidumbre) utilizaremos como estimador a la **incertidumbre combinada ($u_{c}$)** definida a partir de la siguiente ecuación,
\begin{equation}
u_{c}^{2}(Y) = \sum_{i=1}^{N} (\dfrac{\partial f}{\partial x_{i}})^{2} \cdot u_{c}^{2}(x_{i}) + 2 \sum_{i=1}^{N-1} \sum_{j = i+1}^{N} \dfrac{\partial f}{\partial x_{i}} \dfrac{\partial f}{\partial x_{j}} u(x_{i},x_{j})
\end{equation}
donde $u(x_{i},x_{j})$ es la expresión de la covariancia entre las pdf de las $x_{i}$.
Esta expresión, para permitir el uso de funciones $f_{k}$ no lineales, es la aproximación por serie de Taylor de primer orden de la expresión original para funciones que cumplen linealidad. [[2]](https://es.wikipedia.org/wiki/Propagaci%C3%B3n_de_errores)
A su vez, a partir de la **ley de propagación de incertidumbres**, podemos decir que para la determinación de una variable unitaria mediante medición directa es posible reducir la expresión anterior a la siguiente:
\begin{equation}
u_{c}^{2}(x_{i}) = u_{i}^{2}(x_{i}) + u_{j}^{2}(x_{i})
\end{equation}
donde denominaremos $u_{i}(x_{i})$ a la incertidumbre tipo A, y $u_{j}(x_{i})$ a la incertidumbre tipo B.
***
## Evaluación incertidumbre Tipo A
La incertidumbre tipo A, recordando que se trata de una medida de dispersión y al ser tipo A se relaciona con la estadística de las muestras, se puede estimar con el desvío estándar experimental de la media ($S(\bar{X_{i}})$). Para ello hace falta recordar algunos conceptos de estadística.
Suponiendo que se toman $N$ muestras:
> **Estimador media poblacional:**
>> $\hat{x_{i}}=\bar{X_{i}}=\dfrac{1}{N} \sum_{k=1}^{N}x_{i,k}$
> **Grados de libertad:**
>> $\nu = N-1$
> **Varianza experimental de las observaciones:**
>> $\hat{\sigma^{2}(X_{i})}=S^{2}(X_{i})=\dfrac{1}{\nu} \sum_{k=1}^{N}(X_{i,k} - \bar{X_{i}})^{2}$
> **Varianza experimental de la media:**
>> $\hat{\sigma^{2}(\bar{X_{i}})}=S^{2}(\bar{X_{i}})=\dfrac{S^{2}(x_{i})}{N}$
<div class="alert alert-success">
<strong>Por ende, la componente de la incertidumbre tipo A nos queda:</strong>
\begin{equation}
u_{i}(x_{i}) = \sqrt{S^{2}(\bar{X_{i}})}
\end{equation}
</div>
<div class="alert alert-info">
<strong>Nota:</strong> Para calcular el std con un divisor de $\nu = N-1$ es necesario modificar un argumento en la función de python. El comando correctamente utilizado es: 'myVars.std(ddof=1)'.
</div>
***
## Evaluación incertidumbre Tipo B
La incertidumbre tipo B viene determinada por la información que proveen los fabricantes de los instrumentos de medición, asi como también por los datos resultantes por la calibración de los mismos.
En estos instrumentos de medición la incertidumbre viene descripta en forma de distribuciones densidad de probabilidad, no en forma estadística. Para ello utilizamos los siguientes estadísticos que caracterízan a las variables aleatorias, en caso de que su dominio fuera continuo:
> **Esperanza:**
>> $E(x)=\int x.f(x)dx$
> **Varianza:**
>> $V(x)=\int x^{2}.f(x)dx$
<div class="alert alert-success">
<strong>Por tanto, si la incertidumbre es un parámetro de dispersión, la misma vendrá descripta por la expresión:</strong>
\begin{equation}
u_{j}(x_{i}) = \sqrt{V(x)}
\end{equation}
</div>
Por simplicidad a la hora de trabajar, a continuación se presenta una tabla con los valores típicos del desvío estándar para el caso de distintas distribuciones. Se demuestra el caso de distribución uniforme.

Suponiendo que la distribución esta centrada en $\bar{X_{i}}$, nos quedaría que $a = \bar{X_{i}} - \Delta X$ y $b = \bar{X_{i}} - \Delta X$.
Por tanto si la expresión de la varianza es $V(x_{i}) = \frac{(b-a)^{2}}{12}$, finalmente quedaría:
\begin{equation}
V(x_{i}) = \frac{(b-a)^{2}}{12} = \frac{(2 \Delta X)^{2}}{12} = \frac{4 \Delta X^{2}}{12} = \frac{\Delta X^{2}}{3}
\end{equation}
\begin{equation}
\sigma_{x_{i}} = \frac{\Delta X}{\sqrt{3}}
\end{equation}
Finalmente la tabla queda,
| Distribution | $u_{j}(x_{i}) = \sigma_{x_{i}}$|
| :----: | :----: |
| Uniforme | $\frac{\Delta X}{\sqrt{3}}$ |
| Normal | $\Delta X $ |
| Normal ($K=2$) | $\frac{\Delta X}{2} $ |
| Triangular | $\frac{\Delta X}{\sqrt{6}}$ |
| U | $\frac{\Delta X}{\sqrt{2}}$ |
<div class="alert alert-danger">
<strong>Verificar que esto este bien.</strong> Me genera dudas el término $\Delta X$. Esto no creo que deba ser así porque en el caso de la distribución normal $\sigma_{x_{i}} = \sigma$. No creo que deba aparecer ningun error absoluto ahí.
</div>
***
## Incertidumbre Conjunta
Como definimos anteriormente, la incertidumbre conjunta queda definida como:
\begin{equation}
u_{c}^{2}(x_{i}) = u_{i}^{2}(x_{i}) + u_{j}^{2}(x_{i})
\end{equation}
#### ¿Qué función distribución densidad de probabilidad tiene $u_{c}$?
Si se conocen $x_{1},x_{2},...,x_{N}$ y $Y$ es una combinación lineal de $x_{i}$ (o en su defecto una aproximación lineal, como en el caso del polinomio de taylor de primer grado de la función), podemos conocer la función distribución densidad de probabilidad a partir de la convolución de las $x_{i}$, al igual que se hace para SLIT. [[3]](https://es.wikipedia.org/wiki/Convoluci%C3%B3n)
Dado que habitualmente no se conoce con precisión la función distribución densidad de probabilidad de $u_{i}(x_{i})$, se suele utilizar el **teorema central del límite** para conocer $u_{c}(x_{i})$. El mismo plantea que cuantas más funciones $x_{i}$ con función distribución densidad de probabilidad deconocida sumemos, más va a tender su resultado a una distribución normal.
***
## Grado de Confianza
Finalmente, el último parámetro que nos interesa conocer para determinar el resultado de la medición es el grado de confianza.
> **Grado de confianza:** Es la probabilidad de que al evaluar nuevamente la media poblacional ($\bar{y}$) nos encontremos con un valor dentro del intervalo $[\bar{Y} - K.\sigma_{Y}(\bar{Y}) \le \mu_{Y} \le \bar{Y} - K.\sigma_{Y}(\bar{Y})]$ para el caso de una distribución que cumpla el teorema central del límite, donde $K$ es el factor de cobertura.
Otra forma de verlo es:

donde el grado de confianza viene representado por $(1-\alpha)$. Recomiendo ver el ejemplo [[4]](https://es.wikipedia.org/wiki/Intervalo_de_confianza#Ejemplo_pr%C3%A1ctico) en caso de no entender lo que representa.
De esta forma, el factor de cobertura ($K$) nos permite modificar el grado de confianza. Agrandar $K$ aumentará el área bajo la curva de la gaussiana, lo que representará un mayor grado de confianza.
Se definirá **incertidumbre expandida** a $U(x_{i}) = K \cdot u_{c}(x_{i})$ si $u_{c}(x_{i})$ es la incertidumbre que nos proveé un grado de confianza de aproximadamente $ 68\% $.
Para una función que distribuye como normal podemos estimar el grado de confianza mediante la siguiente tabla,
| Factor de cobertura | Grado de confianza|
| :----: | :----: |
| $K=1$ | $68.26\% $ |
| $K=2$ | $95.44\% $ |
| $K=3$ | $99.74\% $ |
#### ¿Qué sucede si $u_{c}$ no distribuye normalmente?
En este caso también se podrá utilizar la ecuación $U(x_{i}) = K \cdot u_{c}(x_{i})$, pero el método mediante el cual obtendremos a $K$ será distinto.
***
## Caso de análisis: $u_{i}(x_{i}) \gg u_{j}(X_{i})$
Cuando sucede que la incertidumbre que proveé la evaluación tipo A es muy significativa frente a la tipo B, esto querrá decir que no tenemos suficientes grados de libertad para que $u_{c}(x_{i})$ se aproxime a una gaussiana. En otras palabras, la muestra obtenida no es significativa.
En estos casos vamos a suponer que $u_{c}(x_{i})$ distribuye como t-Student. La distribución t-Student surge justamente del problema de estimar la media de una población normalmente distribuida cuando el tamaño de la muestra es pequeño.
Como la distribución de t-Student tiene como parámetro los grados de libertad efectivos, debemos calcularlos. Para ello utilizaremos la fórmula de Welch-Satterthwaite:
\begin{equation}
\nu_{eff} = \dfrac{u_{c}^{4}(y)}{\sum_{i=1}^{N} \dfrac{ c_{i}^{4} u^{4}(x_{i})} {\nu_{i}} }
\end{equation}
donde $c_i = \dfrac{\partial f}{\partial x_{i}}$ y $u_{i}(x_{i})$ es la incertidumbre tipo A.

Para obtener el factor de cobertura que nos asegure un factor de cobertura del $95/%$ debemos recurrir a la tabla del t-Student. Para ello existe una función dentro del módulo _scipy.stats_ que nos integra la función hasta lograr un área del $95.4%$.
A continuación presentamos la función que utilizaremos con dicho fin,
~~~
def get_factor_Tstudent(V_eff, porcentaje_confianza_objetivo=95.4):
"""
Funcion de calculo de factor de expansión por T-student
input:
V_eff: Grados de libertad (float)
porcentaje_confianza_objetivo: porcentaje_confianza_objetivo (float)
returns:
Factor de expansión (float)
"""
return np.abs( -(stats.t.ppf((1.0+(porcentaje_confianza_objetivo/100))/2.0,V_eff)) )
~~~
***
## Caso de análisis: $u_{i}(x_{i}) \ll u_{j}(X_{i})~$
Para el caso en el que la incertidumbre del muestreo es muy inferior a la incertidumbre tipo B, nos encontramos frente al caso de incertidumbre B dominante. Esta situación es equivalente a tener la convolución entre una delta de dirac con una función de distribución cualquiera.

Como observamos en la imagen, la función distribución densidad de probabilidad resultate se asemeja más a la distribución uniforme del tipo B. En este caso para encontrar el factor de cobertura utilizaremos otra tabla distinta. En esta tabla el parámetro de entrada es el cociente $\dfrac{u_{i}}{u_{j}}$.
A continuación presentamos la función que utilizaremos con dicho fin,
~~~
def tabla_B(arg):
tabla_tipoB = np.array([
[0.0, 1.65],
[0.1, 1.66],
[0.15, 1.68],
[0.20, 1.70],
[0.25, 1.72],
[0.30, 1.75],
[0.35, 1.77],
[0.40, 1.79],
[0.45, 1.82],
[0.50, 1.84],
[0.55, 1.85],
[0.60, 1.87],
[0.65, 1.89],
[0.70, 1.90],
[0.75, 1.91],
[0.80, 1.92],
[0.85, 1.93],
[0.90, 1.94],
[0.95, 1.95],
[1.00, 1.95],
[1.10, 1.96],
[1.20, 1.97],
[1.40, 1.98],
[1.80, 1.99],
[1.90, 1.99]])
if arg >= 2.0:
K = 2.0
else:
pos_min = np.argmin(np.abs(tabla_tipoB[:,0]-arg))
K = tabla_tipoB[pos_min,1]
return K
~~~
***
## Correlación
Finalmente nos encontramos con el caso mas general. En esta situación las variables se encuentran correlacionadas, por lo que la expresión de $u_{c}(Y)$ debe utilizarse en su totalidad.
Por simplicidad de computo vamos a definir al coeficiente correlación como,
\begin{equation}
r(q,w) = \dfrac{ u(q,w) }{ u(q)u(w) }
\end{equation}
De esta forma podemos expresar a $u_{c}$ como:
\begin{equation}
u_{c}^{2}(Y) = \sum_{i=1}^{N} (\dfrac{\partial f}{\partial x_{i}})^{2} \cdot u_{c}^{2}(x_{i}) + 2 \sum_{i=1}^{N-1} \sum_{j = i+1}^{N} \dfrac{\partial f}{\partial x_{i}} \dfrac{\partial f}{\partial x_{j}} r(x_{i},x_{j})u(x_{i})u(x_{j})
\end{equation}
Esta expresión debe utilizarse cada vez que $r(x_{i},x_{j}) \ne 0$.
# Experimentación
**Comenzamos inicializando los módulos necesarios**
```
# módulos genericos
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from scipy import signal
# Módulos para Jupyter (mejores graficos!)
import warnings
warnings.filterwarnings('ignore')
plt.rcParams['figure.figsize'] = [12, 4]
plt.rcParams['figure.dpi'] = 150 # 200 e.g. is really fine, but slower
from pandas import DataFrame
from IPython.display import HTML
```
**Definimos las funciones previamente mencionadas**
```
# Tabla para el caso A dominante
def get_factor_Tstudent(V_eff, porcentaje_confianza_objetivo=95.4):
"""
Funcion de calculo de factor de expansión por T-student
input:
V_eff: Grados de libertad (float)
porcentaje_confianza_objetivo: porcentaje_confianza_objetivo (float)
returns: .libertad efectivosdenoted
Factor de expansión (float)
"""
return np.abs( -(stats.t.ppf((1.0+(porcentaje_confianza_objetivo/100))/2.0,V_eff)) )
# Tabla para el caso B dominante
def tabla_B(arg):
tabla_tipoB = np.array([
[0.0, 1.65],
[0.1, 1.66],
[0.15, 1.68],
[0.20, 1.70],
[0.25, 1.72],
[0.30, 1.75],
[0.35, 1.77],
[0.40, 1.79],
[0.45, 1.82],
[0.50, 1.84],
[0.55, 1.85],
[0.60, 1.87],
[0.65, 1.89],
[0.70, 1.90],
[0.75, 1.91],
[0.80, 1.92],
[0.85, 1.93],
[0.90, 1.94],
[0.95, 1.95],
[1.00, 1.95],
[1.10, 1.96],
[1.20, 1.97],
[1.40, 1.98],
[1.80, 1.99],
[1.90, 1.99]])
if arg >= 2.0:
K = 2.0
else:
pos_min = np.argmin(np.abs(tabla_tipoB[:,0]-arg))
K = tabla_tipoB[pos_min,1]
return K
```
## Caso general
**Definimos las constantes necesarias**
```
# Constantes del instrumento
CONST_ERROR_PORCENTUAL = 0.5 # Error porcentual del instrumento de medición
CONST_ERROR_CUENTA = 3 # Error en cuentas del instrumento de medición
CONST_DECIMALES = 2 # Cantidad de decimales que representa el instrumento
# Constantes del muestro
N = 10 # Cantidad de muestras tomadas
# Señal a muestrear idealizada
mu = 100 # Valor medio de la distribución normal de la población ideal
std = 2 # Desvío estándar de la distribución normal de la población ideal
# Muestreo mi señal ideal (Normal)
muestra = np.random.randn(N) * std + mu
```
**Ahora solamente genero un gráfico que compare el histograma con la distribución normal de fondo**
```
num_bins = 50
fig, ax = plt.subplots()
# the histogram of the 1.1data
n, bins, patches = ax.hist(muestra, num_bins, density=True)
# add a 'best fit' line
y = ((1 / (np.sqrt(2 * np.pi) * std)) *
np.exp(-0.5 * (1 / std * (bins - mu))**2))
ax.plot(bins, y, '--')
ax.set_xlabel('Smarts')
ax.set_ylabel('Probability density')
ax.set_title('Histogram of IQ: $\mu=$'+ str(mu) + ', $\sigma=$' + str(std))
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
plt.show()
media = np.round(muestra.mean(), CONST_DECIMALES) # Redondeamos los decimales a los valores que puede ver el tester
desvio = muestra.std(ddof=1)
print("Mean:",media )
print("STD:" ,desvio)
```
**Calculamos el desvío estándar experimental de la media como:**
\begin{equation}
u_{i}(x_{i}) = \sqrt{S^{2}(\bar{X_{i}})}
\end{equation}
```
#Incertidumbre Tipo A
ui = desvio/np.sqrt(N)
ui
```
**Calculamos el error porcentual total del dispositivo de medición como:**
\begin{equation}
e_{\%T} = e_{\%} + \dfrac{e_{cuenta}\cdot 100\%}{\bar{X_{i}}(10^{cte_{Decimales}})}
\end{equation}
```
#Incertidumbre Tipo B
ERROR_PORCENTUAL_CUENTA = (CONST_ERROR_CUENTA*100)/(media * (10**CONST_DECIMALES ))
ERROR_PORCENTUAL_TOTAL = CONST_ERROR_PORCENTUAL + ERROR_PORCENTUAL_CUENTA
ERROR_PORCENTUAL_CUENTA
```
**Por tanto el error absoluto se representa como:**
\begin{equation}
\Delta X = e_{\%T} \dfrac{\bar{X_{i}}}{100\%}
\end{equation}
```
deltaX = ERROR_PORCENTUAL_TOTAL * media/100
deltaX
```
**Finalmente la incertidumbre tipo B queda:**
\begin{equation}
u_{j}(x_{i}) = \sqrt{Var(x_{i})} = \dfrac{\Delta X}{\sqrt{3}}
\end{equation}
donde recordamos que, al suponer una distribución uniforme en el dispositivo de medición, la varianza nos queda $Var(X_{uniforme}) = \dfrac {(b-a)^{2}}{12}$.
```
uj = deltaX / np.sqrt(3)
uj
```
**Calculamos la incertidumbre conjunta**
Como este es el caso de una medición directa de una sola variable, la expresión apropiada es:
\begin{equation}
u_{c}^{2}(x_{i}) = u_{i}^{2}(x_{i}) + u_{j}^{2}(x_{i})
\end{equation}
```
#incertidumbre combinada
uc = np.sqrt(ui**2 + uj**2)
uc
```
**Ahora debemos evaluar frente a que caso nos encontramos**
En primera instancia evaluamos que componente de la incertidumbre es mayoritaria y en que proporción.
Entonces tenemos tres situaciones posibles:
1. **Caso B dominante** $\Rightarrow \dfrac{u_{i}(x_{i})}{u_{j}(x_{i})} \lt 1 \Rightarrow$ Se utiliza la tabla de B dominante.
1. **Caso Normal** $\Rightarrow \dfrac{u_{i}(x_{i})}{u_{j}(x_{i})} \gt 1$ y $V_{eff} \gt 30 \Rightarrow$ Se toma $K=2$.
1. **Caso A dominante** $\Rightarrow \dfrac{u_{i}(x_{i})}{u_{j}(x_{i})} \gt 1$ y $V_{eff} \lt 30 \Rightarrow$ Se utiliza t-Student con los grados de libertad efectivos.
```
def evaluacion(uc,ui,uj,N):
cte_prop = ui/uj
print("Constante de proporcionalidad", cte_prop)
if cte_prop > 1:
# Calculo los grados de libertad efectivos
veff = int ((uc**4)/((ui**4)/(N-1)))
print("Grados efectivos: ", veff)
if veff > 30:
# Caso Normal
k = 2
else:
# Caso t-Student
k = get_factor_Tstudent(veff)
else:
# Caso B Dominante
k = tabla_B(cte_prop)
print("Constante de expansión: ",k)
return k
```
<div class="alert alert-warning">
<strong>Nota:</strong> La contribución de $u_{j}(x_{i})$ no se tiene en cuenta dado que, al ser una distribución continua, tiene infinitos grados de libertad.
\begin{equation}
\nu_{eff} = \dfrac{u_{c}^{4}(y)}{\sum_{i=1}^{N} \dfrac{ c_{i}^{4} u^{4}(x_{i})} {\nu_{i}} }
\end{equation}
</div>
```
k = evaluacion(uc,ui,uj,N)
```
**Análisis y presentación del resultado**
Como el cociente $\dfrac{u_{i}(x_{i})}{u_{j}(x_{i})} \gt 2$, entonces suponemos que nos encontramos frente al caso de distribución normal o distribución t-Student. Para ello utilizamos el criterio de los grados de libertad efectivos.
En este caso los grado de libertad efectivos $V_{eff} \gt 30$, por lo que suponemos distribución normal.
Finalmente presentamos el resultado con 1 dígito significativo.
```
U = uc*k
print("Resultado de la medición: (",np.round(media,1),"+-",np.round(U,1),")V con un grado de confianza del 95%")
```
# Bibliografía
_Nota: Las citas **no** respetan el formato APA._
1. [Evaluación de la Incertidumbre en Datos Experimentales, Javier Miranda Martín del Campo](http://depa.fquim.unam.mx/amyd/archivero/CALCULODEINCERTIDUMBRESDR.JAVIERMIRANDA_26197.pdf)
1. [Propagación de erroes, Wikipedia](https://es.wikipedia.org/wiki/Propagaci%C3%B3n_de_errores)
1. [Convolución, Wikipedia](https://es.wikipedia.org/wiki/Convoluci%C3%B3n)
1. [Intervalo de Confianza, Wikipedia](https://es.wikipedia.org/wiki/Intervalo_de_confianza#Ejemplo_pr%C3%A1ctico)
| github_jupyter |
```
import sys
import os
import pandas as pd
import matplotlib.pyplot as plt
# parentDir = os.path.dirname(os.getcwd())
# sys.path.insert(0,parentDir )
myMods = os.path.join(os.getcwd(), "myMods")
sys.path.insert(0,myMods)
import mainFun.apiFix as apiFix
import mainFun.createReport as createReport
import mainFun.getView as getView
import mainFun.loadHelper as loadHelper
import mainFun.vennPlot as vennPlot
import utils
api_df1 = loadHelper.loadPickle('rawData/api_df1.pkl')
api_df2 = loadHelper.loadPickle('rawData/api_df2.pkl')
plt.rcParams["figure.figsize"] = (20,10)
os.getcwd()
# https://www.arothuis.nl/posts/one-off-docker-images/
# Create output and input volume.
# Put also the main
# docker run -v $pwd/output:/output -v $pwd/input:/input example3
# docker run -b C:/Users/Lionel/Python/dockerPractice/output:output C:/Users/Lionel/Python/dockerPractice/input:input example4
currentDir = os.getcwd()
# Load Data
p_file1 = 'rawData/api_storage_dict1.pkl'
p_file2 = 'rawData/api_storage_dict2.pkl'
api_storage_dict1 = loadHelper.loadPickle(p_file1)
api_storage_dict2 = loadHelper.loadPickle(p_file2)
# api_df1 = loadHelper.loadAPI1DF(p_file1)
# api_df2 = loadHelper.loadAPI2DF(p_file2)
# pickle.dump( api_df1, open( 'rawData/api_df1.pkl', "wb" ) )
# pickle.dump( api_df2, open( 'rawData/api_df2.pkl', "wb" ) )
api_df1 = loadHelper.loadPickle('rawData/api_df1.pkl')
api_df2 = loadHelper.loadPickle('rawData/api_df2.pkl')
title = 'DRE Report 1'
website = 'https://www.drevidence.com/'
# Create a PDF object
pdf = createReport.DREPDF('P', 'mm', 'Letter')
pdf.set_auto_page_break(auto = True, margin = 15)
pdf.add_font('DejaVu', '', 'ect/DejaVuSansCondensed.ttf', uni=True)
pdf.set_font('DejaVu', '', 14)
# metadata
pdf.set_title(title)
pdf.set_author('DRE')
pdf.set_website(website)
pdf.add_page()
txt = """
I am going to compare the results of two seperate APIs to see if they agree a term has a single Sense.ID
Report on how lists of strings map to Sense.Id using 2 APIs.
API1: DocAnalytics
* This api is how docanalytics identifies terms.
* E.G. https://caladan.doctorevidence.com/portal/suggestions?search={stroke}
API2: DocSearch
* This api is how docsearch identifies terms.
* E.G. https://search.doctorevidence.com/api/annotator/batch-annotate
"""
pdf.multi_cell(0, 5, txt)
pdf.add_page()
txt = """
How many items are matched in API1
"""
pdf.multi_cell(0, 5, txt)
df = pd.crosstab(index = api_df1['File'], columns = 'Count')
df['File'] = df.index
df = df[['File', 'Count']]
data = df.values.tolist()
data.insert(0, df.columns.to_list())
pdf.create_table(table_data = data,title='API1 retreval table', cell_width='uneven')
pdf.add_page()
txt = """
How many items are matched in API2
"""
pdf.multi_cell(0, 5, txt)
df = pd.crosstab(index = api_df2['File'], columns = 'Count')
df['File'] = df.index
df = df[['File', 'Count']]
data = df.values.tolist()
data.insert(0, df.columns.to_list())
pdf.create_table(table_data = data,title='API1 retreval table', cell_width='uneven')
pdf.add_page()
txt = """
How many items are matched in API1
"""
pdf.multi_cell(0, 5, txt)
df= pd.crosstab(index = api_df1['File'], columns = api_df1['N_IDS'])
df['File'] = df.index
data = df.values.tolist()
data.insert(0, df.columns.to_list())
pdf.create_table(table_data = data,title='API1 retreval table', cell_width='uneven')
pdf.add_page()
txt = """
How many items are matched in API2
"""
pdf.multi_cell(0, 5, txt)
df= pd.crosstab(index = api_df2['File'], columns = api_df2['N_IDS'])
df['File'] = df.index
data = df.values.tolist()
data.insert(0, df.columns.to_list())
pdf.create_table(table_data = data,title='API2 retreval table', cell_width='uneven')
pdf.add_page()
plt = vennPlot.plotVenn1(api_df1, api_df2)
plt.savefig('output/images/filename.png', bbox_inches='tight')
pdf.add_page(orientation = 'Landscape')
w = 250
h = w*2/3
pdf.image('output/images/filename.png', w = w, h = h,x=0, y=40)
pdf.output('output/template.pdf', 'F')
```
| github_jupyter |
### Analyze Auto sales trend and verify if RCF detects abrupt shift in sales
#### Years: 2005 to 2020. This period covers recession due to housing crisis in 2008, followed by recovery and economic impact due to Covid
### Data Source: Monthly New Vehicle Sales for the United States Automotive Market
### https://www.goodcarbadcar.net/usa-auto-industry-total-sales-figures/
### Raw data: http://www.bea.gov/
```
import sys
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['figure.dpi'] = 100
import boto3
import botocore
import sagemaker
from sagemaker import RandomCutForest
bucket = sagemaker.Session().default_bucket() # Feel free to change to another bucket you have access to
prefix = 'sagemaker/autosales'
execution_role = sagemaker.get_execution_role()
# check if the bucket exists
try:
boto3.Session().client('s3').head_bucket(Bucket=bucket)
except botocore.exceptions.ParamValidationError as e:
print('Hey! You either forgot to specify your S3 bucket'
' or you gave your bucket an invalid name!')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '403':
print("Hey! You don't have permission to access the bucket, {}.".format(bucket))
elif e.response['Error']['Code'] == '404':
print("Hey! Your bucket, {}, doesn't exist!".format(bucket))
else:
raise
else:
print('Training input/output will be stored in: s3://{}/{}'.format(bucket, prefix))
%%time
data_filename = 'auto_sales_year_month.csv'
df = pd.read_csv(data_filename)
df.shape
df
plt.plot(df['value'])
plt.ylabel('Sales')
plt.title('Monthly Auto Sales - USA')
plt.show()
```
### Big increase in autosales Feb-2012
https://www.theautochannel.com/news/2012/03/02/027504-february-2012-u-s-auto-sales-highest-4-years.html
```
df[75:90]
```
### U.S. Auto Sales Hit Record Low In April 2020
#### Coronavirus Chaos Also Drives Zero-Interest Deals to Record Highs
https://www.edmunds.com/car-news/us-auto-sales-hit-record-low-in-april.html
```
df[175:]
```
# Training
***
Next, we configure a SageMaker training job to train the Random Cut Forest (RCF) algorithm on the taxi cab data.
## Hyperparameters
Particular to a SageMaker RCF training job are the following hyperparameters:
* **`num_samples_per_tree`** - the number randomly sampled data points sent to each tree. As a general rule, `1/num_samples_per_tree` should approximate the the estimated ratio of anomalies to normal points in the dataset.
* **`num_trees`** - the number of trees to create in the forest. Each tree learns a separate model from different samples of data. The full forest model uses the mean predicted anomaly score from each constituent tree.
* **`feature_dim`** - the dimension of each data point.
In addition to these RCF model hyperparameters, we provide additional parameters defining things like the EC2 instance type on which training will run, the S3 bucket containing the data, and the AWS access role. Note that,
* Recommended instance type: `ml.m4`, `ml.c4`, or `ml.c5`
* Current limitations:
* The RCF algorithm does not take advantage of GPU hardware.
```
# Use Spot Instance - Save up to 90% of training cost by using spot instances when compared to on-demand instances
# Reference: https://github.com/aws-samples/amazon-sagemaker-managed-spot-training/blob/main/xgboost_built_in_managed_spot_training_checkpointing/xgboost_built_in_managed_spot_training_checkpointing.ipynb
# if you are still on two-month free-tier you can use the on-demand instance by setting:
# use_spot_instances = False
# We will use spot for training
use_spot_instances = True
max_run = 3600 # in seconds
max_wait = 3600 if use_spot_instances else None # in seconds
job_name = 'rcf-autosales-1yr'
checkpoint_s3_uri = None
if use_spot_instances:
checkpoint_s3_uri = f's3://{bucket}/{prefix}/checkpoints/{job_name}'
print (f'Checkpoint uri: {checkpoint_s3_uri}')
# SDK 2.0
session = sagemaker.Session()
# specify general training job information
# 48 samples = 48 Months of data
rcf = RandomCutForest(role=execution_role,
instance_count=1,
instance_type='ml.m4.xlarge',
data_location='s3://{}/{}/'.format(bucket, prefix),
output_path='s3://{}/{}/output'.format(bucket, prefix),
num_samples_per_tree=48,
num_trees=50,
base_job_name = job_name,
use_spot_instances=use_spot_instances,
max_run=max_run,
max_wait=max_wait,
checkpoint_s3_uri=checkpoint_s3_uri)
# automatically upload the training data to S3 and run the training job
rcf.fit(rcf.record_set(df.value.to_numpy().reshape(-1,1)))
rcf.hyperparameters()
print('Training job name: {}'.format(rcf.latest_training_job.job_name))
```
# Inference
***
A trained Random Cut Forest model does nothing on its own. We now want to use the model we computed to perform inference on data. In this case, it means computing anomaly scores from input time series data points.
We create an inference endpoint using the SageMaker Python SDK `deploy()` function from the job we defined above. We specify the instance type where inference is computed as well as an initial number of instances to spin up. We recommend using the `ml.c5` instance type as it provides the fastest inference time at the lowest cost.
```
rcf_inference = rcf.deploy(
initial_instance_count=1,
instance_type='ml.m5.xlarge',
endpoint_name = job_name)
```
Congratulations! You now have a functioning SageMaker RCF inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console and selecting the endpoint matching the endpoint name, below:
```
print('Endpoint name: {}'.format(rcf_inference.endpoint_name))
```
## Data Serialization/Deserialization
We can pass data in a variety of formats to our inference endpoint. In this example we will demonstrate passing CSV-formatted data. Other available formats are JSON-formatted and RecordIO Protobuf. We make use of the SageMaker Python SDK utilities `csv_serializer` and `json_deserializer` when configuring the inference endpoint.
```
# SDK 2.0 serializers
from sagemaker.serializers import CSVSerializer
from sagemaker.deserializers import JSONDeserializer
rcf_inference.serializer = CSVSerializer()
rcf_inference.deserializer = JSONDeserializer()
```
Let's pass the training dataset, in CSV format, to the inference endpoint so we can automatically detect the anomalies we saw with our eyes in the plots, above. Note that the serializer and deserializer will automatically take care of the datatype conversion from Numpy NDArrays.
For starters, let's only pass in the first six datapoints so we can see what the output looks like.
```
df_numpy = df.value.to_numpy().reshape(-1,1)
print(df_numpy[:6])
results = rcf_inference.predict(df_numpy[:6])
print(results)
```
## Computing Anomaly Scores
Now, let's compute and plot the anomaly scores from the entire taxi dataset.
```
results = rcf_inference.predict(df_numpy)
scores = [datum['score'] for datum in results['scores']]
# add scores to taxi data frame and print first few values
df['score'] = pd.Series(scores, index=df.index)
df.head()
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
#
# *Try this out* - change `start` and `end` to zoom in on the
# anomaly found earlier in this notebook
#
start, end = 0, len(df)
df_subset = df[start:end]
ax1.plot(df_subset['value'], color='C0', alpha=0.8)
ax2.plot(df_subset['score'], color='C1')
ax1.grid(which='major', axis='both')
ax1.set_ylabel('Auto Sales', color='C0')
ax2.set_ylabel('Anomaly Score', color='C1')
ax1.tick_params('y', colors='C0')
ax2.tick_params('y', colors='C1')
ax2.set_ylim(min(scores), 1.4*max(scores))
fig.set_figwidth(10)
```
Note that the anomaly score spikes where our eyeball-norm method suggests there is an anomalous data point as well as in some places where our eyeballs are not as accurate.
Below we print and plot any data points with scores greater than 3 standard deviations (approx 99.9th percentile) from the mean score.
```
score_mean = df['score'].mean()
score_std = df['score'].std()
score_cutoff = score_mean + 3*score_std
anomalies = df_subset[df_subset['score'] > score_cutoff]
anomalies
score_mean, score_std, score_cutoff
ax2.plot(anomalies.index, anomalies.score, 'ko')
fig
```
With the current hyperparameter choices we see that the three-standard-deviation threshold, while able to capture the known anomalies as well as the ones apparent in the ridership plot, is rather sensitive to fine-grained peruturbations and anomalous behavior. Adding trees to the SageMaker RCF model could smooth out the results as well as using a larger data set.
## Stop and Delete the Endpoint
Finally, we should delete the endpoint before we close the notebook.
To do so execute the cell below. Alternately, you can navigate to the "Endpoints" tab in the SageMaker console, select the endpoint with the name stored in the variable `endpoint_name`, and select "Delete" from the "Actions" dropdown menu.
```
# SDK 2.0
rcf_inference.delete_endpoint()
```
# Epilogue
---
We used Amazon SageMaker Random Cut Forest to detect anomalous datapoints in a taxi ridership dataset. In these data the anomalies occurred when ridership was uncharacteristically high or low. However, the RCF algorithm is also capable of detecting when, for example, data breaks periodicity or uncharacteristically changes global behavior.
Depending on the kind of data you have there are several ways to improve algorithm performance. One method, for example, is to use an appropriate training set. If you know that a particular set of data is characteristic of "normal" behavior then training on said set of data will more accurately characterize "abnormal" data.
Another improvement is make use of a windowing technique called "shingling". This is especially useful when working with periodic data with known period, such as the NYC taxi dataset used above. The idea is to treat a period of $P$ datapoints as a single datapoint of feature length $P$ and then run the RCF algorithm on these feature vectors. That is, if our original data consists of points $x_1, x_2, \ldots, x_N \in \mathbb{R}$ then we perform the transformation,
```
data = [[x_1], shingled_data = [[x_1, x_2, ..., x_{P}],
[x_2], ---> [x_2, x_3, ..., x_{P+1}],
... ...
[x_N]] [x_{N-P}, ..., x_{N}]]
```
```
df.head()
import numpy as np
# made a minor correction. increased size by 1 as the original code was missing last shingle
def shingle(data, shingle_size):
num_data = len(data)
# +1
shingled_data = np.zeros((num_data-shingle_size+1, shingle_size))
# +1
for n in range(num_data - shingle_size+1):
shingled_data[n] = data[n:(n+shingle_size)]
return shingled_data
# single data with shingle size=12 (1 year - 12 months)
# let's try one year auto sales
# let's try 1 year window
shingle_size = 12
prefix_shingled = 'sagemaker/randomcutforest_shingled_1year'
auto_data_shingled = shingle(df.values[:,1], shingle_size)
job_name = 'rcf-autosales-shingled-1year'
checkpoint_s3_uri = None
if use_spot_instances:
checkpoint_s3_uri = f's3://{bucket}/{prefix_shingled}/checkpoints/{job_name}'
print (f'Checkpoint uri: {checkpoint_s3_uri}')
df.values[:24,1]
shingle(df.values[:24,1],12)
auto_data_shingled[:5]
auto_data_shingled[-5:]
auto_data_shingled.shape
```
We create a new training job and and inference endpoint. (Note that we cannot re-use the endpoint created above because it was trained with one-dimensional data.)
```
# SDK 2.0
session = sagemaker.Session()
# specify general training job information
rcf = RandomCutForest(role=execution_role,
instance_count=1,
instance_type='ml.m5.xlarge',
data_location='s3://{}/{}/'.format(bucket, prefix_shingled),
output_path='s3://{}/{}/output'.format(bucket, prefix_shingled),
num_samples_per_tree=48,
num_trees=50,
base_job_name = job_name,
use_spot_instances=use_spot_instances,
max_run=max_run,
max_wait=max_wait,
checkpoint_s3_uri=checkpoint_s3_uri)
# automatically upload the training data to S3 and run the training job
rcf.fit(rcf.record_set(auto_data_shingled))
rcf.hyperparameters()
# SDK 2.0 serializers
from sagemaker.serializers import CSVSerializer
from sagemaker.deserializers import JSONDeserializer
rcf_inference = rcf.deploy(
initial_instance_count=1,
instance_type='ml.m5.xlarge',
endpoint_name = job_name
)
rcf_inference.serializer = CSVSerializer()
rcf_inference.deserializer = JSONDeserializer()
```
Using the above inference endpoint we compute the anomaly scores associated with the shingled data.
```
# Score the shingled datapoints
results = rcf_inference.predict(auto_data_shingled)
scores = np.array([datum['score'] for datum in results['scores']])
# Save the scores
np.savetxt("scores_shingle_annual.csv",
np.asarray(scores),
delimiter=",",
fmt='%10.5f')
# compute the shingled score distribution and cutoff and determine anomalous scores
score_mean = scores.mean()
score_std = scores.std()
score_cutoff = score_mean + 1.5*score_std
anomalies = scores[scores > score_cutoff]
anomaly_indices = np.arange(len(scores))[scores > score_cutoff]
print(anomalies)
score_mean, score_std, score_cutoff
anomalies.size
```
Finally, we plot the scores from the shingled data on top of the original dataset and mark the score lying above the anomaly score threshold.
```
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
#
# *Try this out* - change `start` and `end` to zoom in on the
# anomaly found earlier in this notebook
#
start, end = 0, len(df)
taxi_data_subset = df[start:end]
ax1.plot(df['value'], color='C0', alpha=0.8)
ax2.plot(scores, color='C1')
ax2.scatter(anomaly_indices, anomalies, color='k')
ax1.grid(which='major', axis='both')
ax1.set_ylabel('Auto Sales', color='C0')
ax2.set_ylabel('Anomaly Score', color='C1')
ax1.tick_params('y', colors='C0')
ax2.tick_params('y', colors='C1')
ax2.set_ylim(min(scores), 1.4*max(scores))
fig.set_figwidth(10)
```
We see that with this particular shingle size, hyperparameter selection, and anomaly cutoff threshold that the shingled approach more clearly captures the major anomalous events: the spike at around t=6000 and the dips at around t=9000 and t=10000. In general, the number of trees, sample size, and anomaly score cutoff are all parameters that a data scientist may need experiment with in order to achieve desired results. The use of a labeled test dataset allows the used to obtain common accuracy metrics for anomaly detection algorithms. For more information about Amazon SageMaker Random Cut Forest see the [AWS Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/randomcutforest.html).
```
# compute the shingled score distribution and cutoff and determine anomalous scores
score_mean = scores.mean()
score_std = scores.std()
score_cutoff = score_mean + 2.0*score_std
anomalies = scores[scores > score_cutoff]
anomaly_indices = np.arange(len(scores))[scores > score_cutoff]
print(anomalies)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
#
# *Try this out* - change `start` and `end` to zoom in on the
# anomaly found earlier in this notebook
#
start, end = 0, len(df)
taxi_data_subset = df[start:end]
ax1.plot(df['value'], color='C0', alpha=0.8)
ax2.plot(scores, color='C1')
ax2.scatter(anomaly_indices, anomalies, color='k')
ax1.grid(which='major', axis='both')
ax1.set_ylabel('Auto Sales', color='C0')
ax2.set_ylabel('Anomaly Score', color='C1')
ax1.tick_params('y', colors='C0')
ax2.tick_params('y', colors='C1')
ax2.set_ylim(min(scores), 1.4*max(scores))
fig.set_figwidth(10)
# SDK 2.0
rcf_inference.delete_endpoint()
```
| github_jupyter |
```
# Following imports pylab notebook without giving the user rubbish messages
import os, sys
stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
%pylab notebook
sys.stdout = stdout
from scipy.optimize import differential_evolution, minimize
import matplotlib.lines as mlines
from matplotlib.legend_handler import HandlerLine2D
from scipy.misc import imread
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
import ipywidgets as widgets
from IPython.display import display, Markdown
matplotlib.rcParams['figure.subplot.left'] = 0
#matplotlib.rcParams['figure.figsize'] = (7, 6)
class Sandpit:
def __init__(self, f):
# Default options
self.game_mode = 0 # 0 - Jacobian, 1 - Depth Only, 2 - Steepest Descent
self.grad_length = 1/5
self.grad_max_length = 1
self.arrowhead_width = 0.1
self.arrow_placement = 2 # 0 - tip, 1 - base, 2 - centre, 3 - tail
self.tol = 0.15 # Tolerance
self.markerColour = (1, 0.85, 0)
self.contourCM = LinearSegmentedColormap.from_list("Cmap", [
(0., 0.00505074, 0.191104),
(0.155556, 0.0777596, 0.166931),
(0.311111, 0.150468, 0.142758),
(0.466667, 0.223177, 0.118585),
(0.622222, 0.295886, 0.094412),
(0.777778, 0.368595, 0.070239),
(0.822222, 0.389369, 0.0633324),
(0.866667, 0.410143, 0.0564258),
(0.911111, 0.430917, 0.0495193),
(0.955556, 0.451691, 0.0426127),
(1., 0.472465, 0.0357061)
], N=256)
self.start_text = "**Click anywhere in the sandpit to place the dip-stick.**"
self.win_text = "### Congratulations!\nWell done, you found the phone."
# Initialisation variables
self.revealed = False
self.handler_map = {}
self.nGuess = 0
self.msgbox = widgets.Output()
# Parameters
self.f = f # Contour function
x0 = self.x0 = differential_evolution(lambda xs: f(xs[0], xs[1]), ((0,6),(0,6))).x
x1 = differential_evolution(lambda xs: -f(xs[0], xs[1]), ((0,6),(0,6))).x
f0 = f(x0[0], x0[1])
f1 = f(x1[0], x1[1])
self.f = lambda x, y: 8 * (f(x, y) - f1) / (f1 - f0) - 1
self.df = lambda x, y: np.array([self.f(x+0.01,y)-self.f(x-0.01,y), self.f(x,y+0.01)-self.f(x,y-0.01)]) / 0.02
self.d2f = lambda x, y: np.array([
[ self.df(x+0.01,y)[0]-self.df(x-0.01,y)[0], self.df(x,y+0.01)[0]-self.df(x,y-0.01)[0] ],
[ self.df(x+0.01,y)[1]-self.df(x-0.01,y)[1], self.df(x,y+0.01)[1]-self.df(x,y-0.01)[1] ]
]) / 0.02
def draw(self):
self.fig, self.ax = plt.subplots()
self.ax.set_xlim([0,6])
self.ax.set_ylim([0,6])
self.ax.set_aspect(1)
self.fig.canvas.mpl_connect('button_press_event', lambda e: self.onclick(e))
self.drawcid = self.fig.canvas.mpl_connect('draw_event', lambda e: self.ondraw(e))
self.leg = self.ax.legend(handles=[] , bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title="Depths:")
img = imread("readonly/sand.png")
self.ax.imshow(img,zorder=0, extent=[0, 6, 0, 6], interpolation="bilinear")
display(self.msgbox)
def onclick(self, event):
if (event.button != 1):
return
x = event.xdata
y = event.ydata
self.placeArrow(x, y)
if np.linalg.norm(self.x0 - [x,y]) <= self.tol:
self.showContours()
return
lx = minimize(lambda xs: self.f(xs[0], xs[1]), np.array([x,y])).x
if np.linalg.norm(lx - [x,y]) <= self.tol:
self.local_min(lx[0], lx[1])
return
i = 5
if self.game_mode == 2:
while i > 0 :
i = i - 1
dx = self.next_step(self.f(x, y), self.df(x, y), self.d2f(x, y))
self.ax.plot([x, x+dx[0]],[y, y+dx[1]], '-', zorder=15, color=(1,0,0,0.5), ms=6)
x += dx[0]
y += dx[1]
if x < 0 or x > 6 or y < 0 or y > 6 :
break
self.placeArrow(x, y, auto=True)
if np.linalg.norm(self.x0 - [x,y]) <= self.tol:
self.showContours()
break
lx = minimize(lambda xs: self.f(xs[0], xs[1]), np.array([x,y])).x
if np.linalg.norm(lx - [x,y]) <= self.tol:
self.local_min(lx[0], lx[1])
break
def ondraw(self, event):
self.fig.canvas.mpl_disconnect(self.drawcid) # Only do this once, then self destruct the event.
self.displayMsg(self.start_text)
def placeArrow(self, x, y, auto=False):
d = -self.df(x,y) * self.grad_length
dhat = d / np.linalg.norm(d)
d = d * np.clip(np.linalg.norm(d), 0, self.grad_max_length) / np.linalg.norm(d)
if self.arrow_placement == 0: # tip
off = d + dhat * 1.5 * self.arrowhead_width
elif self.arrow_placement == 1: # head
off = d
elif self.arrow_placement == 2: # centre
off = d / 2
else: # tail
off = array((0, 0))
if auto:
self.ax.plot([x],[y], 'yo', zorder=25, color="red", ms=6)
else:
self.nGuess += 1
p, = self.ax.plot([x],[y], 'yo', zorder=25, label=
str(self.nGuess) + ") %.2fm" % self.f(x,y), color=self.markerColour, ms=8, markeredgecolor="black")
if (self.nGuess <= 25) :
self.ax.text(x + 0.2*dhat[1], y - 0.2*dhat[0], str(self.nGuess))
self.handler_map[p] = HandlerLine2D(numpoints=1)
self.leg = self.ax.legend(handler_map=self.handler_map,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title="Depths:")
if (self.nGuess == 22 and not self.revealed) :
self.displayMsg("**Hurry Up!** The supervisor has calls to make.")
elif not self.revealed:
self.showContours()
self.displayMsg("**Try again.** You've taken too many tries to find the phone. Reload the sandpit and try again.")
if self.game_mode != 1:
self.ax.arrow(x-off[0],y-off[1], d[0], d[1],
linewidth=1.5, head_width=self.arrowhead_width,
head_starts_at_zero=False, zorder=20, color="black")
def showContours(self):
if self.revealed:
return
x0 = self.x0
X, Y = np.meshgrid(np.arange(0,6,0.05), np.arange(0,6,0.05))
self.ax.contour(X, Y, self.f(X,Y),10, cmap=self.contourCM)
img = imread("readonly/phone2.png")
self.ax.imshow(img,zorder=30, extent=[x0[0] - 0.375/2, x0[0] + 0.375/2, x0[1] - 0.375/2, x0[1] + 0.375/2], interpolation="bilinear")
self.displayMsg(self.win_text)
self.revealed = True
def local_min(self, x, y) :
img = imread("readonly/nophone.png")
self.ax.imshow(img,zorder=30, extent=[x - 0.375/2, x + 0.375/2, y - 0.375/2, y + 0.375/2], interpolation="bilinear")
if not self.revealed:
self.displayMsg("**Oh no!** You've got stuck in a local optimum. Try somewhere else!")
def displayMsg(self, msg):
self.msgbox.clear_output()
with self.msgbox:
display(Markdown(msg))
```
| github_jupyter |
```
import glob
import os
import warnings
import geopandas
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors
import pandas
import seaborn
from cartopy import crs as ccrs
from mpl_toolkits.axes_grid1 import make_axes_locatable
# from geopandas/geoseries.py:358, when using geopandas.clip:
#
# UserWarning: GeoSeries.notna() previously returned False for both missing (None) and empty geometries.
# Now, it only returns False for missing values. Since the calling GeoSeries contains empty geometries,
# the result has changed compared to previous versions of GeoPandas.
#
# Given a GeoSeries 's', you can use '~s.is_empty & s.notna()' to get back the old behaviour.
#
# To further ignore this warning, you can do:
warnings.filterwarnings('ignore', 'GeoSeries.notna', UserWarning)
# default to larger figures
plt.rcParams['figure.figsize'] = 10, 10
```
# Postprocessing and plotting EEH analysis
Scenarios
- [x] Colour coded map showing the percentage changes in EEH population by LAD
- [x] Total EEH population compared with ONS projection
- [x] Total housing growth per LAD, 2015-2020, 2020-2030, 2030-2040, 2040-2050 (may be better as cumulative chart with LADs)
Pathways
- [x] Proportion of engine types for each Pathway 2015-2050
- [x] Annual CO2 emission * 5 Pathways 2015, 2020, 2030, 2040, 2050
- [x] Colour coded map showing Vehicle km in 2050 for each LAD * 5 Pathways
- [x] Annual electricity consumption for car trips * 5 Pathways, 2015, 2020, 2030, 2040, 2050
- [x] Congestion/capacity utilisation in 2050 for each LAD * 5 Pathways (map/chart)
```
all_zones = geopandas.read_file('../preparation/Local_Authority_Districts__December_2019__Boundaries_UK_BUC-shp/Local_Authority_Districts__December_2019__Boundaries_UK_BUC.shp')
zone_codes = pandas.read_csv('lads-codes-eeh.csv').lad19cd
eeh_zones = all_zones \
[all_zones.lad19cd.isin(zone_codes)] \
[['lad19cd', 'lad19nm', 'st_areasha', 'geometry']]
eeh_zones.plot()
scenarios = [os.path.basename(d) for d in sorted(glob.glob('eeh/0*'))]
scenarios
timesteps = [os.path.basename(d) for d in sorted(glob.glob('eeh/01-BaU/*'))]
timesteps
```
## Population scenario
```
def read_pop(fname):
pop = pandas.read_csv(fname)
pop = pop \
[pop.year.isin([2015, 2050])] \
.melt(id_vars='year', var_name='lad19cd', value_name='population')
pop = pop[pop.lad19cd.isin(zone_codes)] \
.pivot(index='lad19cd', columns='year')
pop.columns = ['pop2015', 'pop2050']
pop['perc_change'] = (pop.pop2050 - pop.pop2015) / pop.pop2015
pop.perc_change *= 100
return pop
eehpop = read_pop('../preparation/data/csvfiles/eehPopulation.csv')
arcpop = read_pop('../preparation/data/csvfiles/eehArcPopulationBaseline.csv')
eehpop.sort_values(by='perc_change').tail()
def plot_pop(eeh_zones, pop):
df = eeh_zones.merge(pop, on='lad19cd', validate='one_to_one')
fig, ax = plt.subplots(1, 1)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
df.plot(column='perc_change', ax=ax, legend=True, cax=cax, cmap='coolwarm_r', vmax=95, vmin=-95)
cax.yaxis.set_label_text('Population (% change 2015-2050)')
cax.yaxis.get_label().set_visible(True)
return fig
eehpop.to_csv('eehPopulationChange.csv')
fig = plot_pop(eeh_zones, eehpop)
plt.savefig("eehPopulationChange.png")
plt.savefig("eehPopulationChange.svg")
fig = plot_pop(eeh_zones, arcpop)
plt.savefig("snppPopulationChange.png")
plt.savefig("snppPopulationChange.svg")
```
## Results
```
def read_result(fname, scenarios, timesteps):
dfs = []
for s in scenarios:
for t in timesteps:
path = os.path.join('eeh', s, t, fname)
_, ext = os.path.splitext(fname)
if ext == '.csv':
df = pandas.read_csv(path)
elif ext in ('.shp', '.gpkg', '.geojson'):
df = geopandas.read_file(path)
else:
raise Exception(f"Don't know how to read files of type '{ext}'")
df['year'] = t
df['scenario'] = s
dfs.append(df)
return pandas.concat(dfs)
```
## CO2 Emissions
```
zone_vehicle_emissions = read_result('totalCO2EmissionsZonalPerVehicleType.csv', scenarios, timesteps)
zone_vehicle_emissions.head(2)
annual_eeh_emissions = zone_vehicle_emissions[zone_vehicle_emissions.zone.isin(zone_codes)] \
.groupby(['scenario', 'year']) \
.sum()
annual_eeh_emissions['TOTAL'] = annual_eeh_emissions.sum(axis=1)
annual_eeh_emissions.to_csv('eehCO2Emissions.csv')
annual_eeh_emissions.head(10)
```
## Vehicle km per LAD
```
vkm_a = read_result('vehicleKilometresWithAccessEgress.csv', scenarios, timesteps)
eeh_vkm_a = vkm_a[vkm_a.zone.isin(zone_codes)] \
.set_index(['scenario', 'year', 'zone'])
eeh_vkm_a['TOTAL'] = eeh_vkm_a.sum(axis=1)
eeh_vkm_a.to_csv('eehVehicleKilometresWithAccessEgress.csv')
eeh_vkm_a.head()
vkm = read_result('vehicleKilometres.csv', scenarios, timesteps)
eeh_vkm = vkm[vkm.zone.isin(zone_codes)] \
.set_index(['scenario', 'year', 'zone'])
eeh_vkm['TOTAL'] = eeh_vkm.sum(axis=1)
eeh_vkm.to_csv('eehVehicleKilometres.csv')
eeh_vkm.head()
eeh_vkm.describe()
df = eeh_vkm.reset_index().drop(columns='zone').groupby(['scenario', 'year']).sum()[['TOTAL']].reset_index()
seaborn.catplot(
x = "year",
y = "TOTAL",
hue = "scenario",
data = df,
kind = "bar")
def plot_vkm(eeh_zones, eeh_vkm, scenario, year):
vmax = eeh_vkm.TOTAL.max()
df = eeh_vkm[['TOTAL']].reset_index() \
.rename(columns={'TOTAL': 'vkm'})
df = df[(df.scenario == scenario) & (df.year == year)] \
.drop(columns=['scenario', 'year'])
df = geopandas.GeoDataFrame(df.merge(eeh_zones, left_on='zone', right_on='lad19cd', validate='one_to_one'))
fig, ax = plt.subplots(1, 1)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
df.plot(column='vkm', ax=ax, legend=True, cax=cax, cmap='inferno', vmax=vmax)
cax.yaxis.set_label_text('Vehicle kilometres (km)')
cax.yaxis.get_label().set_visible(True)
return fig
fig = plot_vkm(eeh_zones, eeh_vkm, scenarios[0], "2015")
plt.savefig("eehVehicleKilometres2015.png")
plt.savefig("eehVehicleKilometres2015.svg")
for s in scenarios:
fig = plot_vkm(eeh_zones, eeh_vkm, s, "2050")
plt.savefig(f"eehVehicleKilometres2050_{s}.png")
plt.savefig(f"eehVehicleKilometres2050_{s}.svg")
```
## Electricity consumption for car trips
```
car_elec = read_result('zonalTemporalElectricityCAR.csv', scenarios, timesteps)
car_elec = car_elec[car_elec.zone.isin(zone_codes)] \
.set_index(['scenario', 'year', 'zone'])
car_elec['TOTAL'] = car_elec.sum(axis=1)
car_elec.to_csv('eehZonalTemporalElectricityCAR.csv')
car_elec.head(2)
car_energy = read_result('energyConsumptionsZonalCar.csv', scenarios, timesteps)
car_energy = car_energy[car_energy.zone.isin(zone_codes)] \
.set_index(['scenario', 'year', 'zone'])
car_energy.to_csv('eehEnergyConsumptionsZonalCar.csv')
car_energy.head(2)
```
## Congestion/capacity utilisation
```
zb = eeh_zones.bounds
extent = (zb.minx.min(), zb.maxx.max(), zb.miny.min(), zb.maxy.max())
extent
network_base = read_result('outputNetwork.shp', [scenarios[0]], ["2015"])
eeh_nb = network_base.cx[extent[0]:extent[1], extent[2]:extent[3]].copy()
eeh_nbc = geopandas.clip(eeh_nb, eeh_zones)
eeh_nb.head(1)
eeh_nb.drop(columns=['SRefE','SRefN','IsFerry', 'iDir', 'Anode', 'Bnode', 'CP', 'year', 'CapUtil', 'scenario']).to_file('eehNetwork.gpkg', driver='GPKG')
def plot_cap(zones, network, network_clipped):
fig, ax = plt.subplots(1, 1)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
zones.plot(ax=ax, color='#eeeeee', edgecolor='white')
network.plot(ax=ax, color='#eeeeee')
network_clipped.plot(column='CapUtil', ax=ax, legend=True, cax=cax, cmap='inferno', vmax=200)
cax.yaxis.set_label_text('Capacity Utilisation (%)')
cax.yaxis.get_label().set_visible(True)
return fig
fig = plot_cap(eeh_zones, eeh_nb, eeh_nbc)
plt.savefig('eehCapacity2015.png')
plt.savefig('eehCapacity2015.svg')
for s in scenarios:
network = read_result('outputNetwork.shp', [s], ["2050"])
eeh_nb = network.cx[extent[0]:extent[1], extent[2]:extent[3]].copy()
eeh_nbc = geopandas.clip(eeh_nb, eeh_zones)
fig = plot_cap(eeh_zones, eeh_nb, eeh_nbc)
plt.savefig(f'eehCapacity2050_{s}.png')
plt.savefig(f'eehCapacity2050_{s}.svg')
dfs = []
df = read_result('outputNetwork.shp', [scenarios[0]], ["2015"])
df = geopandas.clip(df, eeh_zones) \
[['EdgeID', 'Anode', 'Bnode', 'CP', 'RoadNumber', 'iDir', 'SRefE',
'SRefN', 'Distance', 'FFspeed', 'FFtime', 'IsFerry', 'Lanes', 'CapUtil',
'year', 'scenario']]
dfs.append(df)
for s in scenarios:
df = read_result('outputNetwork.shp', [s], ["2050"])
df = geopandas.clip(df, eeh_zones) \
[['EdgeID', 'Anode', 'Bnode', 'CP', 'RoadNumber', 'iDir', 'SRefE',
'SRefN', 'Distance', 'FFspeed', 'FFtime', 'IsFerry', 'Lanes', 'CapUtil',
'year', 'scenario']]
dfs.append(df)
link_capacity = pandas.concat(dfs) \
.set_index(['scenario', 'year'])
link_capacity.head(2)
link_to_lad = geopandas.sjoin(eeh_nbc, eeh_zones, how="left", op='intersects') \
[['EdgeID','lad19cd','lad19nm']] \
.drop_duplicates(subset=['EdgeID'])
link_to_lad
link_capacity
link_capacity_with_lad = link_capacity \
.reset_index() \
.merge(link_to_lad, on='EdgeID', how='left') \
.set_index(['scenario', 'year', 'EdgeID'])
link_capacity_with_lad
link_capacity_with_lad.to_csv('eehLinkCapUtil.csv')
mean_cap = link_capacity_with_lad[['CapUtil', 'lad19cd','lad19nm']] \
.reset_index() \
.drop(columns='EdgeID') \
.groupby(['scenario', 'year', 'lad19cd', 'lad19nm']) \
.mean()
mean_cap.to_csv('eehLADAverageCapUtil.csv')
mean_cap
df = mean_cap.reset_index()
print(len(df.scenario.unique()))
print(len(df.year.unique()))
print(len(df.lad19cd.unique()))
print(6 * 37)
```
## Link travel times/speeds
```
link_times = read_result('linkTravelTimes.csv', scenarios, timesteps)
link_times.head(1)
eeh_nbc
eeh_lt = link_times[link_times.edgeID.isin(eeh_nbc.EdgeID)]
eeh_lt.to_csv('eehLinkTravelTimes.csv', index=False)
KM_TO_MILES = 0.6213712
hours = [
'MIDNIGHT', 'ONEAM', 'TWOAM', 'THREEAM', 'FOURAM', 'FIVEAM',
'SIXAM', 'SEVENAM', 'EIGHTAM', 'NINEAM', 'TENAM', 'ELEVENAM',
'NOON', 'ONEPM', 'TWOPM', 'THREEPM', 'FOURPM', 'FIVEPM',
'SIXPM', 'SEVENPM', 'EIGHTPM', 'NINEPM', 'TENPM', 'ELEVENPM'
]
def merge_times_to_network(network_clipped, link_times, hours):
# nbc is clipped network
# lt is link times
# hours is list of hour names
# merge link times (by hour of day) onto network
df = network_clipped \
.drop(columns=['scenario', 'year']) \
.rename(columns={'EdgeID': 'edgeID'}) \
.merge(
link_times,
on="edgeID"
) \
[[
'edgeID', 'RoadNumber', 'iDir', 'Lanes', 'Distance', 'FFspeed',
'MIDNIGHT', 'ONEAM', 'TWOAM', 'THREEAM', 'FOURAM', 'FIVEAM',
'SIXAM', 'SEVENAM', 'EIGHTAM', 'NINEAM', 'TENAM', 'ELEVENAM',
'NOON', 'ONEPM', 'TWOPM', 'THREEPM', 'FOURPM', 'FIVEPM',
'SIXPM', 'SEVENPM', 'EIGHTPM', 'NINEPM', 'TENPM', 'ELEVENPM',
'geometry'
]]
# calculate flow speeds from distance / time * 60 [to get back to km/h] * 0.6213712 [to miles/h]
for hour in hours:
df[hour] = (df.Distance / df[hour]) * 60 * KM_TO_MILES
df.FFspeed *= KM_TO_MILES
return df
eeh_ltb = merge_times_to_network(
eeh_nbc,
eeh_lt[(eeh_lt.scenario == '01-BaU') & (eeh_lt.year == "2015")],
hours)
eeh_ltb
eeh_ltb.columns
def plot_speed(zones, network, network_clipped, col, label=None):
fig, ax = plt.subplots(1, 1)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
zones.plot(ax=ax, color='#eeeeee', edgecolor='white')
network.plot(ax=ax, color='#eeeeee')
network_clipped.plot(column=col, ax=ax, legend=True, cax=cax, cmap='inferno', vmax=75, vmin=0)
if label is not None:
# place a text box in upper left in axes coords
props = props = dict(boxstyle='round', facecolor='white', alpha=0.5)
ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
cax.yaxis.set_label_text('Speed (km/h)')
cax.yaxis.get_label().set_visible(True)
return fig
fig = plot_speed(eeh_zones, eeh_nb, eeh_ltb, 'EIGHTAM', "Morning peak")
fname = f"speed2015_peakam.png"
plt.savefig(fname)
plt.close(fig)
fig = plot_speed(eeh_zones, eeh_nb, eeh_ltb, 'FFspeed', "Free flow")
fname = f"speed2015_free.png"
plt.savefig(fname)
plt.close(fig)
for i, hour in enumerate(hours):
fig = plot_speed(eeh_zones, eeh_nb, eeh_ltb, hour, f"{str(i).zfill(2)}:00")
fname = f"speed2015_{str(i).zfill(3)}.png"
print(fname, end=" ")
plt.savefig(fname)
plt.close(fig)
```
### Convert to GIF
Using imagemagick, needs installing, next line runs in the shell
```
! convert -delay 20 -loop 0 speed2015_0*.png speed2015.gif
```
### Each scenario peak speeds in 2050
```
for scenario in scenarios:
ltb = merge_times_to_network(
eeh_nbc,
eeh_lt[(eeh_lt.scenario == scenario) & (eeh_lt.year == "2050")],
hours)
fig = plot_speed(eeh_zones, eeh_nb, ltb, 'EIGHTAM', "Morning peak")
fname = f"speed2050_{scenario}_peakam.png"
print(fname, end=" ")
plt.savefig(fname)
plt.close(fig)
```
## Rank links per-scenario for peak speed in 2050
```
eeh_flow = eeh_lt[eeh_lt.year == "2050"] \
[["scenario", "edgeID", "EIGHTAM", "freeFlow"]] \
.rename(columns={'EIGHTAM': 'peakFlow'})
eeh_flow['flowRatio'] = eeh_flow.freeFlow / eeh_flow.peakFlow
eeh_flow.drop(columns=['peakFlow', 'freeFlow'], inplace=True)
eeh_flow = eeh_flow.pivot_table(columns='scenario', index='edgeID', values='flowRatio')
eeh_flow.columns.name = None
eeh_flow['bestScenarioAtPeak'] = eeh_flow.idxmax(axis=1)
eeh_flow.head(2)
eeh_flow.groupby('bestScenarioAtPeak').count()[["01-BaU"]]
eeh_flowg = eeh_nbc \
[["EdgeID", "RoadNumber", "iDir", "Distance", "Lanes", "geometry"]] \
.rename(columns={'EdgeID': 'edgeID'}) \
.merge(
eeh_flow,
on="edgeID"
)
lu = {
# '01-BaU': '1:Business as Usual',
# '02-HighlyConnected': '2:Highly Connected',
# '03-AdaptedFleet': '3:Adapted Fleet',
# '04-BehavShiftPolicy': '4:Behaviour Shift (policy-led)',
# '05-BehavShiftResults': '5:Behaviour Shift (results-led)',
'01-BaU': '01 BaU',
'02-HighlyConnected': '02 HC',
'03-AdaptedFleet': '03 AF',
'04-BehavShiftPolicy': '04 BSp',
'05-BehavShiftResults': '05 BSr',
}
eeh_flowg.bestScenarioAtPeak = eeh_flowg.bestScenarioAtPeak \
.apply(lambda s: lu[s])
eeh_flowg.head(1)
eehcm = matplotlib.colors.ListedColormap(
[(74/255, 120/255, 199/255),
(238/255, 131/255, 54/255),
(170/255, 170/255, 170/255),
(255/255, 196/255, 0/255),
(84/255, 130/255, 53/255)],
name='eeh')
fig, ax = plt.subplots(1, 1)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
eeh_zones.plot(ax=ax, color='#f2f2f2', edgecolor='white')
eeh_nb.plot(ax=ax, color='#eeeeee')
eeh_flowg.plot(column='bestScenarioAtPeak', ax=ax, legend=True, cmap=eehcm)
plt.savefig("bestScenarioPeakFlowRatio.png")
plt.savefig("bestScenarioPeakFlowRatio.svg")
```
## Link travel times direct
| github_jupyter |
```
# import data handling libraries
import pandas as pd
import numpy as np
# import graphing libraries
import seaborn as sns
import matplotlib.pyplot as plt
# import stats libraries
from scipy.optimize import curve_fit
from scipy.special import factorial
from scipy.stats import poisson, norm, chi2, ttest_ind, ttest_rel
from scipy import stats
from scipy import fft
from scipy.cluster.hierarchy import dendrogram, linkage
import plotly.express as px
# from sklearn.cluster import AgglomerativeClustering
```
Initialisation function
```
columns = [
'Unique Meter ID',
'Unix Time Stamp',
'Date/Time Stamp',
'Incremental Consumption Value (Gallons)',
'Reading Value (Gallons)'
]
df = pd.read_csv("/Users/derekzheng/Documents/coding/r42/Sample_UtilityX_AMIData.csv",
# names=columns,
header=None,
index_col=False
)
df = df.loc[:,[0,1,2,3,4]]
df.columns = columns
df.head()
dataframe = df
```
Converting to datetime module
```
# convert datatype to datetime
""" This function converts the date and time to datetime datatype
It requires two inputs:
the first being the dataframe
the second being the name of the time column in string format e.g. 'date'
"""
def convert_to_datetime(df, time_col):
df[time_col] = pd.to_datetime(df[time_col])
df['dotw'] = df['Date/Time Stamp'].dt.dayofweek
df['hour'] = df['Date/Time Stamp'].dt.hour
df['doty'] = df['Date/Time Stamp'].dt.dayofyear
return df
convert_to_datetime(df, 'Date/Time Stamp')
def make_timestamps(df, meter_col, date_col):
meters = df[meter_col].unique() # get all unique meters
dates = df[date_col].unique() # get all unique datetime points
# create df with all possible datetime points for each meter
# set columns for new df
df_temp = pd.DataFrame(np.array(np.meshgrid(meters, dates)).T.reshape(-1,2))
df_temp.columns = [meter_col, date_col]
df_temp[date_col] = pd.to_datetime(df_temp[date_col]) # change datatype
df_new = df_temp.merge(df, how = 'left') #merge with original dataframe to give NaN read values where data is missing
df_new = df_new.sort_values([meter_col, date_col])
df_new = df_new.reset_index()
del df_temp
return df_new
def add_periodic_time_columns(df, date_col):
df['dotw'] = df[date_col].dt.dayofweek
df['hour'] = df[date_col].dt.hour
df['doty'] = df[date_col].dt.dayofyear
return df
def interpolate_missing_reads(df, meter_col, date_col, reads_col, nan_timestamps=True):
if nan_timestamps != True:
df_temp = make_timestamps(df, meter_col, date_col)
else:
df_temp = df
df_temp = df_temp.sort_values([meter_col, date_col])
df_temp = df_temp.reset_index()
df_temp.loc[:, [reads_col]]
df_interp = df_temp.interpolate(
method='spline',
limit_direction='both',
limit_area='inside',
order=1
)
return df_interp
df1 = make_timestamps(df, 'Unique Meter ID', 'Date/Time Stamp')
df_test = df1.loc[df1['Unique Meter ID'] == 31793811]
# display(df_test.count())
# df_temp = interpolate_missing_reads(df_test, 'Unique Meter ID', 'Date/Time Stamp', 'Reading Value (Gallons)', nan_timestamps=True)
# df_temp.count()
df1.shape
df1.loc[(df1['Unique Meter ID'] == 23385775)&~(df1['Incremental Consumption Value (Gallons)'].isna())]
dfs = []
df_base = []
i = 1
# print("values: ", df1['Unique Meter ID'].unique())
# s factor for cubic spline -> change it with the :30 OR use interpolate directly not the wrapper
# first and last -> find a linear reg of the data and then you subtract the baseline
# always need to preserve monotonic increasing
#how to make it monotonic increasing with the linear fit taken out
# normalize the data? idk
for meter in df1['Unique Meter ID'].unique():
if i == 1:
df_base = df1.loc[df1['Unique Meter ID'] == meter]
df_base = interpolate_missing_reads(df_base, 'Unique Meter ID', 'Date/Time Stamp', 'Reading Value (Gallons)', nan_timestamps=True)
i += 1
else:
# if i % 10 == 0:
if i == 28 or i == 27:
# print(df_base)
print("skipped ", meter)
i += 1
continue
print(i, " - ", meter)
df_temp = df1.loc[df1['Unique Meter ID'] == meter]
# df_temp = interpolate_missing_reads(df_temp, 'Unique Meter ID', 'Date/Time Stam
df_temp = interpolate_missing_reads(df_temp, 'Unique Meter ID', 'Date/Time Stamp', 'Reading Value (Gallons)', nan_timestamps=True)
df_temp.to_csv('output.csv', mode='a')
df_base = pd.concat([df_base, df_temp])
del df_temp
print(" - ")
i += 1
df_base.to_csv('Sample_Interpolation_Trial1_2020-09-.csv')
# Yearly cycle
# raw_df.groupby(['doty']).\
# agg({'Incremental Consumption Value (Gallons)':'mean'}).plot()
fig, ax = plt.subplots(1, figsize=(12,8))
sns.lineplot(
x='doty',
y='Incremental Consumption Value (Gallons)',
data=df_base
)
# plt.ylabel('')
plt.xlabel('Day of the Year')
plt.title('Filled missing values for n=90 meters')
# # plt.ylim(0,130)
```
| github_jupyter |
<a href="https://www.bigdatauniversity.com"><img src="https://ibm.box.com/shared/static/qo20b88v1hbjztubt06609ovs85q8fau.png" width="400px" align="center"></a>
<h1 align="center"><font size="5">LOGISTIC REGRESSION WITH TENSORFLOW</font></h1>
## Table of Contents
Logistic Regression is one of most important techniques in data science. It is usually used to solve the classic classification problem.
<div class="alert alert-block alert-info" style="margin-top: 20px">
<font size = 3><strong>This lesson covers the following concepts of Logistics Regression:</strong></font>
<br>
<h2>Table of Contents</h2>
<ol>
<li><a href="#ref1">Linear Regression vs Logistic Regression</a></li>
<li><a href="#ref2">Utilizing Logistic Regression in TensorFlow</a></li>
<li><a href="#ref3">Training</a></li>
</ol>
</div>
<p></p>
<br>
<hr>
<a id="ref1"></a>
<h2>What is different between Linear and Logistic Regression?</h2>
While Linear Regression is suited for estimating continuous values (e.g. estimating house price), it is n0t the best tool for predicting the class in which an observed data point belongs. In order to provide estimate for classification, we need some sort of guidance on what would be the <b>most probable class</b> for that data point. For this, we use <b>Logistic Regression</b>.
<div class="alert alert-success alertsuccess" style="margin-top: 20px">
<font size="3"><strong>Recall linear regression:</strong></font>
<br>
<br>
Linear regression finds a function that relates a continuous dependent variable, <i>y</i>, to some predictors (independent variables <i>x1</i>, <i>x2</i>, etc.). Simple linear regression assumes a function of the form:
<br><br>
$$
y = w0 + w1 \times x1 + w2 \times x2 + \cdots
$$
<br>
and finds the values of <i>w0</i>, <i>w1</i>, <i>w2</i>, etc. The term <i>w0</i> is the "intercept" or "constant term" (it's shown as <i>b</i> in the formula below):
<br><br>
$$
Y = W X + b
$$
<p></p>
</div>
Logistic Regression is a variation of Linear Regression, useful when the observed dependent variable, <i>y</i>, is categorical. It produces a formula that predicts the probability of the class label as a function of the independent variables.
Despite the name logistic <i>regression</i>, it is actually a <b>probabilistic classification</b> model. Logistic regression fits a special s-shaped curve by taking the linear regression and transforming the numeric estimate into a probability with the following function:
$$
ProbabilityOfaClass = \theta(y) = \frac{e^y}{1 + e^y} = exp(y) / (1 + exp(y)) = p
$$
which produces p-values between 0 (as y approaches minus infinity $-\infty$) and 1 (as y approaches plus infinity $+\infty$). This now becomes a special kind of non-linear regression.
In this equation, <i>y</i> is the regression result (the sum of the variables weighted by the coefficients), <code>exp</code> is the exponential function and $\theta(y)$ is the <a href="http://en.wikipedia.org/wiki/Logistic_function">logistic function</a>, also called logistic curve. It is a common "S" shape (sigmoid curve), and was first developed for modeling population growth.
You might also have seen this function before, in another configuration:
$$
ProbabilityOfaClass = \theta(y) = \frac{1}{1+e^{-y}}
$$
So, briefly, Logistic Regression passes the input through the logistic/sigmoid function but then treats the result as a probability:
<img src="https://ibm.box.com/shared/static/kgv9alcghmjcv97op4d6onkyxevk23b1.png" width="400" align="center">
-------------------------------
<a id="ref2"></a>
<h2>Utilizing Logistic Regression in TensorFlow</h2>
For us to utilize Logistic Regression in TensorFlow, we first need to import the required libraries. To do so, you can run the code cell below.
```
import tensorflow as tf
import pandas as pd
import numpy as np
import time
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
```
Next, we will load the dataset we are going to use. In this case, we are utilizing the <code>iris</code> dataset, which is inbuilt -- so there's no need to do any preprocessing and we can jump right into manipulating it. We separate the dataset into <i>xs</i> and <i>ys</i>, and then into training <i>xs</i> and <i>ys</i> and testing <i>xs</i> and <i>ys</i>, (pseudo)randomly.
<h3>Understanding the Data</h3>
<h4><code>Iris Dataset</code>:</h4>
This dataset was introduced by British Statistician and Biologist Ronald Fisher, it consists of 50 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). In total it has 150 records under five attributes - petal length, petal width, sepal length, sepal width and species. <a href="https://archive.ics.uci.edu/ml/datasets/iris">Dataset source</a>
Attributes
Independent Variable
<ul>
<li>petal length</li>
<li>petal width</li>
<li>sepal length</li>
<li>sepal width</li>
</ul>
Dependent Variable
<ul>
<li>Species
<ul>
<li>Iris setosa</li>
<li>Iris virginica</li>
<li>Iris versicolor</li>
</ul>
</li>
</ul>
<br>
```
iris = load_iris()
iris_X, iris_y = iris.data[:-1,:], iris.target[:-1]
iris_y= pd.get_dummies(iris_y).values
trainX, testX, trainY, testY = train_test_split(iris_X, iris_y, test_size=0.33, random_state=42)
```
Now we define x and y. These placeholders will hold our iris data (both the features and label matrices), and help pass them along to different parts of the algorithm. You can consider placeholders as empty shells into which we insert our data. We also need to give them shapes which correspond to the shape of our data. Later, we will insert data into these placeholders by “feeding” the placeholders the data via a “feed_dict” (Feed Dictionary).
<h3>Why use Placeholders?</h3>
<ol>
<li>This feature of TensorFlow allows us to create an algorithm which accepts data and knows something about the shape of the data without knowing the amount of data going in.</li>
<li>When we insert “batches” of data in training, we can easily adjust how many examples we train on in a single step without changing the entire algorithm.</li>
</ol>
```
# numFeatures is the number of features in our input data.
# In the iris dataset, this number is '4'.
numFeatures = trainX.shape[1]
# numLabels is the number of classes our data points can be in.
# In the iris dataset, this number is '3'.
numLabels = trainY.shape[1]
# Placeholders
# 'None' means TensorFlow shouldn't expect a fixed number in that dimension
X = tf.placeholder(tf.float32, [None, numFeatures]) # Iris has 4 features, so X is a tensor to hold our data.
yGold = tf.placeholder(tf.float32, [None, numLabels]) # This will be our correct answers matrix for 3 classes.
```
<h3>Set model weights and bias</h3>
Much like Linear Regression, we need a shared variable weight matrix for Logistic Regression. We initialize both <code>W</code> and <code>b</code> as tensors full of zeros. Since we are going to learn <code>W</code> and <code>b</code>, their initial value does not matter too much. These variables are the objects which define the structure of our regression model, and we can save them after they have been trained so we can reuse them later.
We define two TensorFlow variables as our parameters. These variables will hold the weights and biases of our logistic regression and they will be continually updated during training.
Notice that <code>W</code> has a shape of [4, 3] because we want to multiply the 4-dimensional input vectors by it to produce 3-dimensional vectors of evidence for the difference classes. <code>b</code> has a shape of [3] so we can add it to the output. Moreover, unlike our placeholders above which are essentially empty shells waiting to be fed data, TensorFlow variables need to be initialized with values, e.g. with zeros.
```
W = tf.Variable(tf.zeros([4, 3])) # 4-dimensional input and 3 classes
b = tf.Variable(tf.zeros([3])) # 3-dimensional output [0,0,1],[0,1,0],[1,0,0]
#Randomly sample from a normal distribution with standard deviation .01
weights = tf.Variable(tf.random_normal([numFeatures,numLabels],
mean=0,
stddev=0.01,
name="weights"))
bias = tf.Variable(tf.random_normal([1,numLabels],
mean=0,
stddev=0.01,
name="bias"))
```
<h3>Logistic Regression model</h3>
We now define our operations in order to properly run the Logistic Regression. Logistic regression is typically thought of as a single equation:
$$
ŷ =sigmoid(WX+b)
$$
However, for the sake of clarity, we can have it broken into its three main components:
- a weight times features matrix multiplication operation,
- a summation of the weighted features and a bias term,
- and finally the application of a sigmoid function.
As such, you will find these components defined as three separate operations below.
```
# Three-component breakdown of the Logistic Regression equation.
# Note that these feed into each other.
apply_weights_OP = tf.matmul(X, weights, name="apply_weights")
add_bias_OP = tf.add(apply_weights_OP, bias, name="add_bias")
activation_OP = tf.nn.sigmoid(add_bias_OP, name="activation")
```
As we have seen before, the function we are going to use is the <i>logistic function</i> $(\frac{1}{1+e^{-Wx}})$, which is fed the input data after applying weights and bias. In TensorFlow, this function is implemented as the <code>nn.sigmoid</code> function. Effectively, this fits the weighted input with bias into a 0-100 percent curve, which is the probability function we want.
<hr>
<a id="ref3"></a>
<h2>Training</h2>
The learning algorithm is how we search for the best weight vector (${\bf w}$). This search is an optimization problem looking for the hypothesis that optimizes an error/cost measure.
<b>What tell us our model is bad?</b>
The Cost or Loss of the model, so what we want is to minimize that.
<b>What is the cost function in our model?</b>
The cost function we are going to utilize is the Squared Mean Error loss function.
<b>How to minimize the cost function?</b>
We can't use <b>least-squares linear regression</b> here, so we will use <a href="http://en.wikipedia.org/wiki/Gradient_descent">gradient descent</a> instead. Specifically, we will use batch gradient descent which calculates the gradient from all data points in the data set.
<h3>Cost function</h3>
Before defining our cost function, we need to define how long we are going to train and how should we define the learning rate.
```
# Number of Epochs in our training
numEpochs = 700
# Defining our learning rate iterations (decay)
learningRate = tf.train.exponential_decay(learning_rate=0.0008,
global_step= 1,
decay_steps=trainX.shape[0],
decay_rate= 0.95,
staircase=True)
#Defining our cost function - Squared Mean Error
cost_OP = tf.nn.l2_loss(activation_OP-yGold, name="squared_error_cost")
#Defining our Gradient Descent
training_OP = tf.train.GradientDescentOptimizer(learningRate).minimize(cost_OP)
```
Now we move on to actually running our operations. We will start with the operations involved in the prediction phase (i.e. the logistic regression itself).
First, we need to initialize our weights and biases with zeros or random values via the inbuilt Initialization Op, <b>tf.initialize_all_variables()</b>. This Initialization Op will become a node in our computational graph, and when we put the graph into a session, then the Op will run and create the variables.
```
# Create a tensorflow session
sess = tf.Session()
# Initialize our weights and biases variables.
init_OP = tf.global_variables_initializer()
# Initialize all tensorflow variables
sess.run(init_OP)
```
We also want some additional operations to keep track of our model's efficiency over time. We can do this like so:
```
# argmax(activation_OP, 1) returns the label with the most probability
# argmax(yGold, 1) is the correct label
correct_predictions_OP = tf.equal(tf.argmax(activation_OP,1),tf.argmax(yGold,1))
# If every false prediction is 0 and every true prediction is 1, the average returns us the accuracy
accuracy_OP = tf.reduce_mean(tf.cast(correct_predictions_OP, "float"))
# Summary op for regression output
activation_summary_OP = tf.summary.histogram("output", activation_OP)
# Summary op for accuracy
accuracy_summary_OP = tf.summary.scalar("accuracy", accuracy_OP)
# Summary op for cost
cost_summary_OP = tf.summary.scalar("cost", cost_OP)
# Summary ops to check how variables (W, b) are updating after each iteration
weightSummary = tf.summary.histogram("weights", weights.eval(session=sess))
biasSummary = tf.summary.histogram("biases", bias.eval(session=sess))
# Merge all summaries
merged = tf.summary.merge([activation_summary_OP, accuracy_summary_OP, cost_summary_OP, weightSummary, biasSummary])
# Summary writer
writer = tf.summary.FileWriter("summary_logs", sess.graph)
```
Now we can define and run the actual training loop, like this:
```
# Initialize reporting variables
cost = 0
diff = 1
epoch_values = []
accuracy_values = []
cost_values = []
# Training epochs
for i in range(numEpochs):
if i > 1 and diff < .0001:
print("change in cost %g; convergence."%diff)
break
else:
# Run training step
step = sess.run(training_OP, feed_dict={X: trainX, yGold: trainY})
# Report occasional stats
if i % 10 == 0:
# Add epoch to epoch_values
epoch_values.append(i)
# Generate accuracy stats on test data
train_accuracy, newCost = sess.run([accuracy_OP, cost_OP], feed_dict={X: trainX, yGold: trainY})
# Add accuracy to live graphing variable
accuracy_values.append(train_accuracy)
# Add cost to live graphing variable
cost_values.append(newCost)
# Re-assign values for variables
diff = abs(newCost - cost)
cost = newCost
#generate print statements
print("step %d, training accuracy %g, cost %g, change in cost %g"%(i, train_accuracy, newCost, diff))
# How well do we perform on held-out test data?
print("final accuracy on test set: %s" %str(sess.run(accuracy_OP,
feed_dict={X: testX,
yGold: testY})))
```
<b>Why don't we plot the cost to see how it behaves?</b>
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.plot([np.mean(cost_values[i-50:i]) for i in range(len(cost_values))])
plt.show()
```
Assuming no parameters were changed, you should reach a peak accuracy of 90% at the end of training, which is commendable. Try changing the parameters such as the length of training, and maybe some operations to see how the model behaves. Does it take much longer? How is the performance?
<hr>
## Want to learn more?
Running deep learning programs usually needs a high performance platform. __PowerAI__ speeds up deep learning and AI. Built on IBM’s Power Systems, __PowerAI__ is a scalable software platform that accelerates deep learning and AI with blazing performance for individual users or enterprises. The __PowerAI__ platform supports popular machine learning libraries and dependencies including TensorFlow, Caffe, Torch, and Theano. You can use [PowerAI on IMB Cloud](https://cocl.us/ML0120EN_PAI).
Also, you can use __Watson Studio__ to run these notebooks faster with bigger datasets.__Watson Studio__ is IBM’s leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, __Watson Studio__ enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of __Watson Studio__ users today with a free account at [Watson Studio](https://cocl.us/ML0120EN_DSX).This is the end of this lesson. Thank you for reading this notebook, and good luck on your studies.
### Thanks for completing this lesson!
This is the end of **Logistic Regression with TensorFlow** notebook. Hopefully, now you have a deeper understanding of Logistic Regression and how its structure and flow work. Thank you for reading this notebook and good luck on your studies.
Created by: <a href="https://br.linkedin.com/in/walter-gomes-de-amorim-junior-624726121">Saeed Aghabozorgi</a> , <a href="https://br.linkedin.com/in/walter-gomes-de-amorim-junior-624726121">Walter Gomes de Amorim Junior</a> , Victor Barros Costa
<hr>
Copyright © 2018 [Cognitive Class](https://cocl.us/DX0108EN_CC). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
| github_jupyter |
```
#@title Copyright 2021 Google LLC. { display-mode: "form" }
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<table class="ee-notebook-buttons" align="left"><td>
<a target="_blank" href="http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/Earth_Engine_TensorFlow_AI_Platform.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a>
</td><td>
<a target="_blank" href="https://github.com/google/earthengine-api/blob/master/python/examples/ipynb/Earth_Engine_TensorFlow_AI_Platform.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td></table>
# Introduction
This is an Earth Engine <> TensorFlow demonstration notebook. This demonstrates a per-pixel neural network implemented in a way that allows the trained model to be hosted on [Google AI Platform](https://cloud.google.com/ai-platform) and used in Earth Engine for interactive prediction from an `ee.Model.fromAIPlatformPredictor`. See [this example notebook](http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/TF_demo1_keras.ipynb) for background on the dense model.
**Running this demo may incur charges to your Google Cloud Account!**
# Setup software libraries
Import software libraries and/or authenticate as necessary.
## Authenticate to Colab and Cloud
To read/write from a Google Cloud Storage bucket to which you have access, it's necessary to authenticate (as yourself). *This should be the same account you use to login to Earth Engine*. When you run the code below, it will display a link in the output to an authentication page in your browser. Follow the link to a page that will let you grant permission to the Cloud SDK to access your resources. Copy the code from the permissions page back into this notebook and press return to complete the process.
(You may need to run this again if you get a credentials error later.)
```
from google.colab import auth
auth.authenticate_user()
```
## Upgrade Earth Engine and Authenticate
Update Earth Engine to ensure you have the latest version. Authenticate to Earth Engine the same way you did to the Colab notebook. Specifically, run the code to display a link to a permissions page. This gives you access to your Earth Engine account. *This should be the same account you used to login to Cloud previously*. Copy the code from the Earth Engine permissions page back into the notebook and press return to complete the process.
```
!pip install -U earthengine-api --no-deps
import ee
ee.Authenticate()
ee.Initialize()
```
## Test the TensorFlow installation
Import TensorFlow and check the version.
```
import tensorflow as tf
print(tf.__version__)
```
## Test the Folium installation
We will use the Folium library for visualization. Import the library and check the version.
```
import folium
print(folium.__version__)
```
# Define variables
The training data are land cover labels with a single vector of Landsat 8 pixel values (`BANDS`) as predictors. See [this example notebook](http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/TF_demo1_keras.ipynb) for details on how to generate these training data.
```
# REPLACE WITH YOUR CLOUD PROJECT!
PROJECT = 'your-project'
# Cloud Storage bucket with training and testing datasets.
DATA_BUCKET = 'ee-docs-demos'
# Output bucket for trained models. You must be able to write into this bucket.
OUTPUT_BUCKET = 'your-bucket'
# This is a good region for hosting AI models.
REGION = 'us-central1'
# Training and testing dataset file names in the Cloud Storage bucket.
TRAIN_FILE_PREFIX = 'Training_demo'
TEST_FILE_PREFIX = 'Testing_demo'
file_extension = '.tfrecord.gz'
TRAIN_FILE_PATH = 'gs://' + DATA_BUCKET + '/' + TRAIN_FILE_PREFIX + file_extension
TEST_FILE_PATH = 'gs://' + DATA_BUCKET + '/' + TEST_FILE_PREFIX + file_extension
# The labels, consecutive integer indices starting from zero, are stored in
# this property, set on each point.
LABEL = 'landcover'
# Number of label values, i.e. number of classes in the classification.
N_CLASSES = 3
# Use Landsat 8 surface reflectance data for predictors.
L8SR = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
# Use these bands for prediction.
BANDS = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7']
# These names are used to specify properties in the export of
# training/testing data and to define the mapping between names and data
# when reading into TensorFlow datasets.
FEATURE_NAMES = list(BANDS)
FEATURE_NAMES.append(LABEL)
# List of fixed-length features, all of which are float32.
columns = [
tf.io.FixedLenFeature(shape=[1], dtype=tf.float32) for k in FEATURE_NAMES
]
# Dictionary with feature names as keys, fixed-length features as values.
FEATURES_DICT = dict(zip(FEATURE_NAMES, columns))
```
# Read data
### Check existence of the data files
Check that you have permission to read the files in the output Cloud Storage bucket.
```
print('Found training file.' if tf.io.gfile.exists(TRAIN_FILE_PATH)
else 'No training file found.')
print('Found testing file.' if tf.io.gfile.exists(TEST_FILE_PATH)
else 'No testing file found.')
```
## Read into a `tf.data.Dataset`
Here we are going to read a file in Cloud Storage into a `tf.data.Dataset`. ([these TensorFlow docs](https://www.tensorflow.org/guide/data) explain more about reading data into a `tf.data.Dataset`). Check that you can read examples from the file. The purpose here is to ensure that we can read from the file without an error. The actual content is not necessarily human readable. Note that we will use all data for training.
```
# Create a dataset from the TFRecord file in Cloud Storage.
train_dataset = tf.data.TFRecordDataset([TRAIN_FILE_PATH, TEST_FILE_PATH],
compression_type='GZIP')
# Print the first record to check.
print(iter(train_dataset).next())
```
## Parse the dataset
Now we need to make a parsing function for the data in the TFRecord files. The data comes in flattened 2D arrays per record and we want to use the first part of the array for input to the model and the last element of the array as the class label. The parsing function reads data from a serialized `Example` proto (i.e. [`example.proto`](https://github.com/tensorflow/tensorflow/blob/r1.12/tensorflow/core/example/example.proto)) into a dictionary in which the keys are the feature names and the values are the tensors storing the value of the features for that example. ([Learn more about parsing `Example` protocol buffer messages](https://www.tensorflow.org/programmers_guide/datasets#parsing_tfexample_protocol_buffer_messages)).
```
def parse_tfrecord(example_proto):
"""The parsing function.
Read a serialized example into the structure defined by FEATURES_DICT.
Args:
example_proto: a serialized Example.
Returns:
A tuple of the predictors dictionary and the LABEL, cast to an `int32`.
"""
parsed_features = tf.io.parse_single_example(example_proto, FEATURES_DICT)
labels = parsed_features.pop(LABEL)
return parsed_features, tf.cast(labels, tf.int32)
# Map the function over the dataset.
parsed_dataset = train_dataset.map(parse_tfrecord, num_parallel_calls=4)
from pprint import pprint
# Print the first parsed record to check.
pprint(iter(parsed_dataset).next())
```
Note that each record of the parsed dataset contains a tuple. The first element of the tuple is a dictionary with bands names for keys and tensors storing the pixel data for values. The second element of the tuple is tensor storing the class label.
## Adjust dimension and shape
Turn the dictionary of *{name: tensor,...}* into a 1x1xP array of values, where P is the number of predictors. Turn the label into a 1x1x`N_CLASSES` array of indicators (i.e. one-hot vector), in order to use a categorical crossentropy-loss function. Return a tuple of (predictors, indicators where each is a three dimensional array; the first two dimensions are spatial x, y (i.e. 1x1 kernel).
```
# Inputs as a tuple. Make predictors 1x1xP and labels 1x1xN_CLASSES.
def to_tuple(inputs, label):
return (tf.expand_dims(tf.transpose(list(inputs.values())), 1),
tf.expand_dims(tf.one_hot(indices=label, depth=N_CLASSES), 1))
input_dataset = parsed_dataset.map(to_tuple)
# Check the first one.
pprint(iter(input_dataset).next())
input_dataset = input_dataset.shuffle(128).batch(8)
```
# Model setup
Make a densely-connected convolutional model, where the convolution occurs in a 1x1 kernel. This is exactly analogous to the model generated in [this example notebook](http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/TF_demo1_keras.ipynb), but operates in a convolutional manner in a 1x1 kernel. This allows Earth Engine to apply the model spatially, as demonstrated below.
Note that the model used here is purely for demonstration purposes and hasn't gone through any performance tuning.
## Create the Keras model
Before we create the model, there's still a wee bit of pre-processing to get the data into the right input shape and a format that can be used with cross-entropy loss. Specifically, Keras expects a list of inputs and a one-hot vector for the class. (See [the Keras loss function docs](https://keras.io/losses/), [the TensorFlow categorical identity docs](https://www.tensorflow.org/guide/feature_columns#categorical_identity_column) and [the `tf.one_hot` docs](https://www.tensorflow.org/api_docs/python/tf/one_hot) for details).
Here we will use a simple neural network model with a 64 node hidden layer. Once the dataset has been prepared, define the model, compile it, fit it to the training data. See [the Keras `Sequential` model guide](https://keras.io/getting-started/sequential-model-guide/) for more details.
```
from tensorflow import keras
# Define the layers in the model. Note the 1x1 kernels.
model = tf.keras.models.Sequential([
tf.keras.layers.Input((None, None, len(BANDS),)),
tf.keras.layers.Conv2D(64, (1,1), activation=tf.nn.relu),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(N_CLASSES, (1,1), activation=tf.nn.softmax)
])
# Compile the model with the specified loss and optimizer functions.
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Fit the model to the training data. Lucky number 7.
model.fit(x=input_dataset, epochs=7)
```
## Save the trained model
Export the trained model to TensorFlow `SavedModel` format in your cloud storage bucket. The [Cloud Platform storage browser](https://console.cloud.google.com/storage/browser) is useful for checking on these saved models.
```
MODEL_DIR = 'gs://' + OUTPUT_BUCKET + '/demo_pixel_model'
model.save(MODEL_DIR, save_format='tf')
```
# EEification
EEIfication prepares the model for hosting on [Google AI Platform](https://cloud.google.com/ai-platform). Learn more about EEification from [this doc](https://developers.google.com/earth-engine/tensorflow#interacting-with-models-hosted-on-ai-platform). First, get (and SET) input and output names of the nodes. **CHANGE THE OUTPUT NAME TO SOMETHING THAT MAKES SENSE FOR YOUR MODEL!** Keep the input name of 'array', which is how you'll pass data into the model (as an array image).
```
from tensorflow.python.tools import saved_model_utils
meta_graph_def = saved_model_utils.get_meta_graph_def(MODEL_DIR, 'serve')
inputs = meta_graph_def.signature_def['serving_default'].inputs
outputs = meta_graph_def.signature_def['serving_default'].outputs
# Just get the first thing(s) from the serving signature def. i.e. this
# model only has a single input and a single output.
input_name = None
for k,v in inputs.items():
input_name = v.name
break
output_name = None
for k,v in outputs.items():
output_name = v.name
break
# Make a dictionary that maps Earth Engine outputs and inputs to
# AI Platform inputs and outputs, respectively.
import json
input_dict = "'" + json.dumps({input_name: "array"}) + "'"
output_dict = "'" + json.dumps({output_name: "output"}) + "'"
print(input_dict)
print(output_dict)
```
## Run the EEifier
The actual EEification is handled by the `earthengine model prepare` command. Note that you will need to set your Cloud Project prior to running the command.
```
# Put the EEified model next to the trained model directory.
EEIFIED_DIR = 'gs://' + OUTPUT_BUCKET + '/eeified_pixel_model'
# You need to set the project before using the model prepare command.
!earthengine set_project {PROJECT}
!earthengine model prepare --source_dir {MODEL_DIR} --dest_dir {EEIFIED_DIR} --input {input_dict} --output {output_dict}
```
# Deploy and host the EEified model on AI Platform
Now there is another TensorFlow `SavedModel` stored in `EEIFIED_DIR` ready for hosting by AI Platform. Do that from the `gcloud` command line tool, installed in the Colab runtime by default. Be sure to specify a regional model with the `REGION` parameter. Note that the `MODEL_NAME` must be unique. If you already have a model by that name, either name a new model or a new version of the old model. The [Cloud Console AI Platform models page](https://console.cloud.google.com/ai-platform/models) is useful for monitoring your models.
**If you change anything about the trained model, you'll need to re-EEify it and create a new version!**
```
MODEL_NAME = 'pixel_demo_model'
VERSION_NAME = 'v0'
!gcloud ai-platform models create {MODEL_NAME} \
--project {PROJECT} \
--region {REGION}
!gcloud ai-platform versions create {VERSION_NAME} \
--project {PROJECT} \
--region {REGION} \
--model {MODEL_NAME} \
--origin {EEIFIED_DIR} \
--framework "TENSORFLOW" \
--runtime-version=2.3 \
--python-version=3.7
```
# Connect to the hosted model from Earth Engine
1. Generate the input imagery. This should be done in exactly the same way as the training data were generated. See [this example notebook](http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/TF_demo1_keras.ipynb) for details.
2. Connect to the hosted model.
3. Use the model to make predictions.
4. Display the results.
Note that it takes the model a couple minutes to spin up and make predictions.
```
# Cloud masking function.
def maskL8sr(image):
cloudShadowBitMask = ee.Number(2).pow(3).int()
cloudsBitMask = ee.Number(2).pow(5).int()
qa = image.select('pixel_qa')
mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And(
qa.bitwiseAnd(cloudsBitMask).eq(0))
return image.updateMask(mask).select(BANDS).divide(10000)
# The image input data is a 2018 cloud-masked median composite.
image = L8SR.filterDate('2018-01-01', '2018-12-31').map(maskL8sr).median()
# Get a map ID for display in folium.
rgb_vis = {'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.3, 'format': 'png'}
mapid = image.getMapId(rgb_vis)
# Turn into an array image for input to the model.
array_image = image.float().toArray()
# Point to the model hosted on AI Platform. If you specified a region other
# than the default (us-central1) at model creation, specify it here.
model = ee.Model.fromAiPlatformPredictor(
projectName=PROJECT,
modelName=MODEL_NAME,
version=VERSION_NAME,
# Can be anything, but don't make it too big.
inputTileSize=[8, 8],
# Keep this the same as your training data.
proj=ee.Projection('EPSG:4326').atScale(30),
fixInputProj=True,
# Note the names here need to match what you specified in the
# output dictionary you passed to the EEifier.
outputBands={'output': {
'type': ee.PixelType.float(),
'dimensions': 1
}
},
)
# model.predictImage outputs a one dimensional array image that
# packs the output nodes of your model into an array. These
# are class probabilities that you need to unpack into a
# multiband image with arrayFlatten(). If you want class
# labels, use arrayArgmax() as follows.
predictions = model.predictImage(array_image)
probabilities = predictions.arrayFlatten([['bare', 'veg', 'water']])
label = predictions.arrayArgmax().arrayGet([0]).rename('label')
# Get map IDs for display in folium.
probability_vis = {
'bands': ['bare', 'veg', 'water'], 'max': 0.5, 'format': 'png'
}
label_vis = {
'palette': ['red', 'green', 'blue'], 'min': 0, 'max': 2, 'format': 'png'
}
probability_mapid = probabilities.getMapId(probability_vis)
label_mapid = label.getMapId(label_vis)
# Visualize the input imagery and the predictions.
map = folium.Map(location=[37.6413, -122.2582], zoom_start=11)
folium.TileLayer(
tiles=mapid['tile_fetcher'].url_format,
attr='Map Data © <a href="https://earthengine.google.com/">Google Earth Engine</a>',
overlay=True,
name='median composite',
).add_to(map)
folium.TileLayer(
tiles=label_mapid['tile_fetcher'].url_format,
attr='Map Data © <a href="https://earthengine.google.com/">Google Earth Engine</a>',
overlay=True,
name='predicted label',
).add_to(map)
folium.TileLayer(
tiles=probability_mapid['tile_fetcher'].url_format,
attr='Map Data © <a href="https://earthengine.google.com/">Google Earth Engine</a>',
overlay=True,
name='probability',
).add_to(map)
map.add_child(folium.LayerControl())
map
```
| github_jupyter |
## Section Contents
* [plot(): analyze distributions](plot.ipynb)
* [plot_correlation(): analyze correlations](plot_correlation.ipynb)
* [plot_missing(): analyze missing values](plot_missing.ipynb)
* [plot_diff(): analyze difference between DataFrames](plot_diff.ipynb)
* [create_report(): create a profile report](create_report.ipynb)
* [Get intermediates: get the intermediate data](get_intermediates.ipynb)
* [How-to guide: customize your output](how_to_guide.ipynb)
* [Parameter configurations: parameter summary settings](parameter_configurations.ipynb)
* [Insight: automatically insight detection](insights.ipynb)
* [Case study: Titanic](titanic.ipynb)
* [Case study: House Prices](house_price.ipynb)
## Introduction to Exploratory Data Analysis and `dataprep.eda`
[Exploratory Data Analysis (EDA)](https://www.wikiwand.com/en/Exploratory_data_analysis) is the process of exploring a dataset and getting an understanding of its main characteristics. The `dataprep.eda` package simplifies this process by allowing the user to explore important characteristics with simple APIs. Each API allows the user to analyze the dataset from a high level to a low level, and from different perspectives. Specifically, `dataprep.eda` provides the following functionality:
* Analyze column **distributions** with `plot()`. The function `plot()` explores the column distributions and statistics of the dataset. It will detect the column type, and then output various plots and statistics that are appropriate for the respective type. The user can optionally pass one or two columns of interest as parameters: If one column is passed, its distribution will be plotted in various ways, and column statistics will be computed. If two columns are passed, plots depicting the relationship between the two columns will be generated.
* Analyze **correlations** with `plot_correlation()`. The function `plot_correlation()` explores the correlation between columns in various ways and using multiple correlation metrics. By default, it plots correlation matrices with various metrics. The user can optionally pass one or two columns of interest as parameters: If one column is passed, the correlation between this column and all other columns will be computed and ranked. If two columns are passed, a scatter plot and regression line will be plotted.
* Analyze **missing values** with `plot_missing()`. The function `plot_missing()` enables thorough analysis of the missing values and their impact on the dataset. By default, it will generate various plots which display the amount of missing values for each column and any underlying patterns of the missing values in the dataset. To understand the impact of the missing values in one column on the other columns, the user can pass the column name as a parameter. Then, `plot_missing()` will generate the distribution of each column with and without the missing values from the given column, enabling a thorough understanding of their impact.
* Analyze column **differences** with `plot_diff()`. The function `plot_diff()` explores the differences of column distributions and statistics across multiple datasets. It will detect the column type, and then output various plots and statistics that are appropriate for the respective type. The user can optionally set the baseline which is used as the target dataset to compare with other datasets.
The following sections give a simple demonstration of `plot()`, `plot_correlation()`, `plot_missing()`, and `plot_diff()` using an example dataset.
## Analyze distributions with `plot()`
The function `plot()` explores the distributions and statistics of the dataset. The following describes the functionality of `plot()` for a given dataframe `df`.
1. `plot(df)`: plots the distribution of each column and calculates dataset statistics
2. `plot(df, x)`: plots the distribution of column `x` in various ways and calculates column statistics
3. `plot(df, x, y)`: generates plots depicting the relationship between columns `x` and `y`
The following shows an example of `plot(df)`. It plots a histogram for each numerical column, a bar chart for each categorical column, and computes dataset statistics.
```
from dataprep.eda import plot
from dataprep.datasets import load_dataset
import numpy as np
df = load_dataset('titanic')
plot(df)
```
For more information about the function `plot()` see [here](plot.ipynb).
## Analyze correlations with `plot_correlation()`
The function `plot_correlation()` explores the correlation between columns in various ways and using multiple correlation metrics. The following describes the functionality of `plot_correlation()` for a given dataframe `df`.
1. `plot_correlation(df)`: plots correlation matrices (correlations between all pairs of columns)
2. `plot_correlation(df, x)`: plots the most correlated columns to column `x`
3. `plot_correlation(df, x, y)`: plots the joint distribution of column `x` and column `y` and computes a regression line
The following shows an example of `plot_correlation()`. It generates correlation matrices using [Pearson](https://www.wikiwand.com/en/Pearson_correlation_coefficient), [Spearman](https://www.wikiwand.com/en/Spearman%27s_rank_correlation_coefficient), and [KendallTau](https://www.wikiwand.com/en/Kendall_rank_correlation_coefficient) correlation coefficients
```
from dataprep.eda import plot_correlation
from dataprep.datasets import load_dataset
df = load_dataset("wine-quality-red")
plot_correlation(df)
```
For more information about the function `plot_correlation()` see [here](plot_correlation.ipynb).
## Analyze missing values with `plot_missing()`
The function `plot_missing()` enables thorough analysis of the missing values and their impact on the dataset. The following describes the functionality of `plot_missing()` for a given dataframe `df`.
1. `plot_missing(df)`: plots the amount and position of missing values, and their relationship between columns
2. `plot_missing(df, x)`: plots the impact of the missing values in column `x` on all other columns
3. `plot_missing(df, x, y)`: plots the impact of the missing values from column `x` on column `y` in various ways.
```
from dataprep.eda import plot_missing
from dataprep.datasets import load_dataset
df = load_dataset("titanic")
plot_missing(df)
```
For more information about the function `plot_missing()` see [here](plot_missing.ipynb).
## Analyze difference with `plot_diff()`
The function `plot_diff()` explores the difference of column distributions and statistics across multiple datasets. The following describes the functionality of `plot_diff()` for two given dataframes `df1` and `df2`.
```
from dataprep.eda import plot_diff
from dataprep.datasets import load_dataset
df1 = load_dataset("house_prices_test").iloc[:, :9]
df2 = load_dataset("house_prices_train").iloc[:, :9]
plot_diff([df1, df2])
```
For more information about the function `plot_diff()` see [here](plot_diff.ipynb).
## Create a profile report with `create_report()`
The function `create_report()` generates a comprehensive profile report of the dataset. `create_report()` combines the individual components of the `dataprep.eda` package and outputs them into a nicely formatted HTML document. The document contains the following information:
1. Overview: detect the types of columns in a dataframe
2. Variables: variable type, unique values, distint count, missing values
3. Quantile statistics like minimum value, Q1, median, Q3, maximum, range, interquartile range
4. Descriptive statistics like mean, mode, standard deviation, sum, median absolute deviation, coefficient of variation, kurtosis, skewness
5. Text analysis for length, sample and letter
6. Correlations: highlighting of highly correlated variables, Spearman, Pearson and Kendall matrices
7. Missing Values: bar chart, heatmap and spectrum of missing values
An example report can be downloaded [here](../../_static/images/create_report/titanic_dp.html).
## Customize the plot
### Customize plot via `config`
The plot/report can be customized via the `config` parameter. E.g., enable/disable some plots, set the bins of histogram, set the height and width of the plots.
The following example shows how to set the bins of histogram to 1000, and disable the `KDE Plot`. For more configurations, please read this doc: [Parameter configurations: parameter summary settings](parameter_configurations.ipynb)
```
from dataprep.eda import plot
from dataprep.datasets import load_dataset
df = load_dataset('titanic')
plot(df, config = {"hist.bins": 1000, "kde.enable": False})
```
### Identify some plots to show via `display`
In `config`, you can set disable some plots by set its `enable` to False, as shown in the above example which disable `KDE Plot`. However, sometimes you may just want to show some plots and disable all other plots. In this case, using the`display` is a more convenient approach. You can just input the plot names that you want to display in `display`. E.g., the following code will show only `Interactions` section in report:
```
from dataprep.eda import create_report
from dataprep.datasets import load_dataset
df = load_dataset('titanic')
create_report(df, display = ["Interactions"])
```
The following code will show only box plot:
```
from dataprep.eda import plot
from dataprep.datasets import load_dataset
df = load_dataset('titanic')
plot(df, display = ["Bar Chart"])
```
## Get the intermediate data
DataPrep.EDA separates the computation and rendering, so that you can just compute the intermediate data and render it using other plotting libraries.
For each `plot` function, there is a corresponding `compute` function, which returns the computed intermediates used for rendering. For example, for `plot_correlation(df)` function, you can get the intermediates using `compute_correlation(df)`. It's a dictionary, and you can also save it to a json file.
```
from dataprep.eda import compute_correlation
from dataprep.datasets import load_dataset
df = load_dataset("titanic")
imdt = compute_correlation(df)
imdt.save("imdt.json")
imdt
```
## Specifying colors
The supported colors of DataPrep.EDA match those of the [Bokeh](https://bokeh.org/) library. Color values can be provided in any of the following ways:
* any of the [147 named CSS colors](http://www.w3schools.com/colors/colors_names.asp), e.g 'green', 'indigo'
* an RGB(A) hex value, e.g., '#FF0000', '#44444444'
* a 3-tuple of integers (r,g,b) between 0 and 255
* a 4-tuple of (r,g,b,a) where r, g, b are integers between 0 and 255 and a is a floating point value between 0 and 1
| github_jupyter |
# Prominent paths originating from epilepsy to a Compound
```
import math
import pandas
from neo4j import GraphDatabase
from tqdm.notebook import tqdm
import hetnetpy.readwrite
import hetnetpy.neo4j
from src.database_utils import get_db_connection
epilepsy_id = 'DOID:1826'
# Get top ten most important metapaths for Compound-epilepsy
query = f'''\
SELECT
outer_pc.dwpc as dwpc,
outer_pc.p_value as p_value,
outer_pc.metapath_id as metapath_id,
top_ids.source_name as source_name,
top_ids.target_name as target_name
FROM (
SELECT dwpc, p_value, metapath_id, source_id, target_id, n1.name AS source_name, n2.name AS target_name
FROM dj_hetmech_app_pathcount pc
JOIN dj_hetmech_app_node join_node
ON pc.target_id=join_node.id OR pc.source_id=join_node.id
JOIN dj_hetmech_app_node n1
ON pc.source_id = n1.id
JOIN dj_hetmech_app_node n2
ON pc.target_id = n2.id
WHERE join_node.identifier='{epilepsy_id}' AND (n1.metanode_id = 'Compound' OR n2.metanode_id = 'Compound')
ORDER BY pc.p_value
) AS top_ids
JOIN dj_hetmech_app_pathcount outer_pc
ON (top_ids.source_id = outer_pc.source_id AND
top_ids.target_id = outer_pc.target_id) OR
(top_ids.source_id = outer_pc.target_id AND
top_ids.target_id = outer_pc.source_id)
ORDER BY outer_pc.p_value;
'''
with get_db_connection() as connection:
top_metapaths = pandas.read_sql(query, connection)
top_metapaths = top_metapaths.sort_values(by=['source_name', 'metapath_id'])
# Ensure that you only have one copy of each (source_name, metapath_id) pair
top_metapaths = top_metapaths.drop_duplicates(subset=['source_name', 'metapath_id'])
top_metapaths = top_metapaths.sort_values(by='p_value')
# Remove any rows with NaN values
top_metapaths = top_metapaths.dropna()
min_p_value = top_metapaths[top_metapaths.p_value != 0].p_value.min()
top_metapaths.loc[top_metapaths.p_value == 0, 'p_value'] = min_p_value
print(top_metapaths.p_value.min())
top_metapaths['neg_log_p_value'] = top_metapaths.p_value.apply(lambda x: -math.log10(x))
top_metapaths.head()
url = 'https://github.com/hetio/hetionet/raw/76550e6c93fbe92124edc71725e8c7dd4ca8b1f5/hetnet/json/hetionet-v1.0-metagraph.json'
metagraph = hetnetpy.readwrite.read_metagraph(url)
def get_paths_for_metapath(metagraph, row):
'''
Return a list of dictionaries containing the information for all paths with a given source, target, and metapath
Parameters
----------
metagraph : a hetnetpy.hetnet.Metagraph instance to interpret metapath abbreviations
row : a row from a pandas dataframe with information about the given metapath, source, and target
'''
damping_exponent = .5
metapath_data = metagraph.metapath_from_abbrev(row['metapath_id'])
query = hetnetpy.neo4j.construct_pdp_query(metapath_data, path_style='string', property='name')
driver = GraphDatabase.driver("bolt://neo4j.het.io")
params = {
'source': row['source_name'],
'target': row['target_name'],
'w': damping_exponent
}
with driver.session() as session:
metapath_result = session.run(query, params)
metapath_result = metapath_result.data()
for path in metapath_result:
path['metapath'] = row['metapath_id']
path['metapath_importance'] = row['neg_log_p_value']
path['path_importance'] = path['metapath_importance'] * path['percent_of_DWPC']
path['source'] = row['source_name']
metapath_df = pandas.DataFrame(metapath_result)
return metapath_df
%%time
# For row in top_metapaths
result_list = []
for index, row in tqdm(top_metapaths.iterrows(), total=len(top_metapaths.index)):
metapath_df = get_paths_for_metapath(metagraph, row)
result_list.append(metapath_df)
result_df = pandas.concat(result_list, ignore_index=True)
result_df = result_df.sort_values(by=['source', 'path_importance', 'metapath'], ascending=[True, False, True])
result_df.head()
result_df.to_csv('data/epilepsy_paths.tsv.xz', index=False, sep='\t', float_format="%.5g")
```
| github_jupyter |
```
from hyperneat.spatial_node import SpatialNode, SpatialNodeType
from hyperneat.substrate import Substrate
from hyperneat.evolution import Hyperneat
from neat.genes import ConnectionGene, NodeGene, NodeType
from neat.genome import Genome
from neat.activation_functions import ActivationFunction
from neat.neural_network import NeuralNetwork
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
# Genome
genome = Genome(num_layers=15, weights_range=[-3.0, 3.0])
genome.create_genome_by_size(8, 3)
net = genome.build_phenotype()
# Substrate setting
# Init substrate set
substrate_set = []
for i in range(2):
s = Substrate()
s.activation_function = ActivationFunction().get('TANH')
# Must create new objects or deep copies
s.input_nodes = [SpatialNode(0, SpatialNodeType.INPUT, [0.0, -0.5], ActivationFunction().get('TANH'), 0)]
s.output_nodes = [SpatialNode(1, SpatialNodeType.OUTPUT, [-0.5, 0.5], ActivationFunction().get('TANH'), 2),
SpatialNode(2, SpatialNodeType.OUTPUT, [0.5, 0.5], ActivationFunction().get('TANH'), 2)]
s.hidden_nodes = [SpatialNode(3, SpatialNodeType.HIDDEN, [-0.5, 0.0], ActivationFunction().get('TANH'), 1),
SpatialNode(4, SpatialNodeType.HIDDEN, [0.5, 0.0], ActivationFunction().get('TANH'), 1)]
s.input_count = 1
s.output_count = 2
s.hidden_count = 2
s.extend_nodes_list()
substrate_set.append(s)
substrate_set[0].coordinates = (-0.5, 0.5)
substrate_set[1].coordinates = (0.5, 0.5)
intra_substrate_conn = [[0, 1], [0, 2], [0, 3], [0, 4], [3, 1], [3, 2], [3, 4], [4, 1], [4, 2], [4, 3]]
inter_substrate_conn = [[0, 4, 1, 3], [1, 3, 0, 4]]
ea = Hyperneat()
ea.connection_threshold = 0.05
ea.max_connection_weight = 0.5
ea.max_bias = 0.06
ea.max_delay = 0.2
net = ea.build_modular_substrate(genome, substrate_set, intra_substrate_conn, inter_substrate_conn)
net.reset_values()
time = np.linspace(0, 20, 20 / 0.05)
signal_1 = np.sin(time)
signal_2 = np.cos(time)
output_signal = np.zeros([4, time.shape[0]])
out_id = net.out_neurons
for t, _ in enumerate(time):
net.input([signal_1[t], signal_2[t]])
net.activate_net(0.05)
for o, oid in enumerate(out_id):
output_signal[o, t] = net.neurons[oid].output
fig, ax = plt.subplots(2, 2)
ax[0, 0].plot(output_signal[0])
ax[0, 1].plot(output_signal[1])
ax[1, 0].plot(output_signal[2])
ax[1, 1].plot(output_signal[3])
plt.tight_layout()
np.linspace(0, 1, int(6 / 0.05), endpoint=False).shape[0]
inter_substrate_conn = [[0, 1], [0, 2], [0, 3], [0, 4], [3, 1], [3, 2], [3, 4], [4, 1], [4, 2], [4, 3]]
inter_substrate_conn = inter_substrate_conn * 4, [[1, 0], [2, 0], [3, 0], [4, 0]]
inter_substrate_conn[0]
arr = []
for _ in range(4):
arr.append(inter_substrate_conn)
arr.append([[1, 0], [2, 0], [3, 0], [4, 0]])
arr
for item in arr[0]:
print(item)
```
| github_jupyter |
# 🧐 Find label errors with cleanlab
In this tutorial, we will show you how you can find possible labeling errors in your data set with the help of [*cleanlab*](https://github.com/cgnorthcutt/cleanlab) and *Rubrix*.
## Introduction
As shown recently by [Curtis G. Northcutt et al.](https://arxiv.org/abs/2103.14749) label errors are pervasive even in the most-cited test sets used to benchmark the progress of the field of machine learning.
In the worst-case scenario, these label errors can destabilize benchmarks and tend to favor more complex models with a higher capacity over lower capacity models.
They introduce a new principled framework to “identify label errors, characterize label noise, and learn with noisy labels” called **confident learning**. It is open-sourced as the [cleanlab Python package](https://github.com/cgnorthcutt/cleanlab) that supports finding, quantifying, and learning with label errors in data sets.
This tutorial walks you through 5 basic steps to find and correct label errors in your data set:
1. 💾 Load the data set you want to check, and a model trained on it;
2. 💻 Make predictions for the test split of your data set;
3. 🧐 Get label error candidates with *cleanlab*;
4. 🔦 Uncover label errors with *Rubrix*;
5. 🖍 Correct label errors and load the corrected data set;
## Setup Rubrix
If you are new to Rubrix, visit and star Rubrix for updates: ⭐ [Github repository](https://github.com/recognai/rubrix)
If you have not installed and launched Rubrix, check the [Setup and Installation guide](../getting_started/setup&installation.rst).
Once installed, you only need to import Rubrix:
```
import rubrix as rb
```
### Install tutorial dependencies
Apart from [cleanlab](https://github.com/cgnorthcutt/cleanlab), we will also install the Hugging Face libraries [transformers](https://github.com/huggingface/transformers) and [datasets](https://github.com/huggingface/datasets), as well as [PyTorch](https://pytorch.org/), that provide us with the model and the data set we are going to investigate.
```
!pip install cleanlab torch transformers datasets
exit(0)
```
### Imports
Let us import all the necessary stuff in the beginning.
```
import rubrix as rb
from cleanlab.pruning import get_noise_indices
import torch
import datasets
from transformers import AutoTokenizer, AutoModelForSequenceClassification
```
## 1. Load model and data set
For this tutorial we will use the well studied [Microsoft Research Paraphrase Corpus](https://microsoft.com/en-us/download/details.aspx?id=52398) (MRPC) data set that forms part of the [GLUE benchmark](https://gluebenchmark.com/), and a pre-trained model from the Hugging Face Hub that was fine-tuned on this specific data set.
Let us first get the model and its corresponding tokenizer to be able to make predictions. For a detailed guide on how to use the 🤗 *transformers* library, please refer to their excellent [documentation](https://huggingface.co/transformers/task_summary.html#sequence-classification).
```
model_name = "textattack/roberta-base-MRPC"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
```
We then get the test split of the MRPC data set, that we will scan for label errors.
```
dataset = datasets.load_dataset("glue", "mrpc", split="test")
```
Let us have a quick look at the format of the data set. Label `1` means that both `sentence1` and `sentence2` are *semantically equivalent*, a `0` as label implies that the sentence pair is *not equivalent*.
```
dataset.to_pandas().head()
```
## 2. Make predictions
Now let us use the model to get predictions for our data set, and add those to our dataset instance. We will use the `.map` functionality of the *datasets* library to process our data batch-wise.
```
def get_model_predictions(batch):
# batch is a dictionary of lists
tokenized_input = tokenizer(
batch["sentence1"], batch["sentence2"], padding=True, return_tensors="pt"
)
# get logits of the model prediction
logits = model(**tokenized_input).logits
# convert logits to probabilities
probabilities = torch.softmax(logits, dim=1).detach().numpy()
return {"probabilities": probabilities}
# Apply predictions batch-wise
dataset = dataset.map(
get_model_predictions,
batched=True,
batch_size=16,
)
```
## 3. Get label error candidates
To identify label error candidates the cleanlab framework simply needs the probability matrix of our predictions (`n x m`, where `n` is the number of examples and `m` the number of labels), and the potentially noisy labels.
```
# Output the data as numpy arrays
dataset.set_format("numpy")
# Get a boolean array of label error candidates
label_error_candidates = get_noise_indices(
s=dataset["label"],
psx=dataset["probabilities"],
)
```
This one line of code provides us with a boolean array of label error candidates that we can investigate further.
Out of the **1725 sentence pairs** present in the test data set we obtain **129 candidates** (7.5%) for possible label errors.
```
frac = label_error_candidates.sum()/len(dataset)
print(
f"Total: {len(dataset)}\n"
f"Candidates: {label_error_candidates.sum()} ({100*frac:0.1f}%)"
)
```
## 4. Uncover label errors in Rubrix
Now that we have a list of potential candidates, let us log them to *Rubrix* to uncover and correct the label errors.
First we switch to a pandas DataFrame to filter out our candidates.
```
candidates = dataset.to_pandas()[label_error_candidates]
```
Then we will turn those candidates into [TextClassificationRecords](../reference/python_client_api.rst#rubrix.client.models.TextClassificationRecord) that we will log to *Rubrix*.
```
def make_record(row):
prediction = list(zip(["Not equivalent", "Equivalent"], row.probabilities))
annotation = "Not equivalent"
if row.label == 1:
annotation = "Equivalent"
return rb.TextClassificationRecord(
inputs={"sentence1": row.sentence1, "sentence2": row.sentence2},
prediction=prediction,
prediction_agent="textattack/roberta-base-MRPC",
annotation=annotation,
annotation_agent="MRPC"
)
records = candidates.apply(make_record, axis=1)
```
Having our records at hand we can now log them to *Rubrix* and save them in a dataset that we call `"mrpc_label_error"`.
```
rb.log(records, name="mrpc_label_error")
```
Scanning through the records in the [*Explore Mode*](../reference/rubrix_webapp_reference.rst#explore-mode) of *Rubrix*, we were able to find at least **30 clear cases** of label errors.
A couple of examples are shown below, in which the noisy labels are shown in the upper right corner of each example.
The predictions of the model together with their probabilities are shown below each sentence pair.

If your model is not terribly over-fitted, you can also try to run the candidate search over your training data to find very obvious label errors.
If we repeat the steps above on the training split of the MRPC data set (3668 examples), we obtain **9 candidates** (this low number is expected) out of which **5 examples** were clear cases of label errors.
A couple of examples are shown below.

## 5. Correct label errors
With *Rubrix* it is very easy to correct those label errors.
Just switch on the [*Annotation Mode*](../reference/rubrix_webapp_reference.rst#annotation-mode), correct the noisy labels and load the dataset back into your notebook.
```
# Load the dataset into a pandas DataFrame
dataset_with_corrected_labels = rb.load("mrpc_label_error")
dataset_with_corrected_labels.head()
```
Now you can use the corrected data set to repeat your benchmarks and measure your model's "real-word performance" you care about in practice.
## Summary
In this tutorial we saw how to leverage *cleanlab* and *Rubrix* to uncover label errors in your data set.
In just a few steps you can quickly check if your test data set is seriously affected by label errors and if your benchmarks are really meaningful in practice.
Maybe your less complex models turns out to beat your resource hungry super model, and the deployment process just got a little bit easier 😀.
*Cleanlab* and *Rubrix* do not care about the model architecture or the framework you are working with.
They just care about the underlying data and allow you to put more humans in the loop of your AI Lifecycle.
## Next steps
### 📚 [Rubrix documentation](https://docs.rubrix.ml) for more guides and tutorials.
### 🙋♀️ Join the Rubrix community! A good place to start is the [discussion forum](https://github.com/recognai/rubrix/discussions).
### ⭐ Rubrix [Github repo](https://github.com/recognai/rubrix) to stay updated.
| github_jupyter |
### Task Video :
#### Dataset Link:
Dataset can be found at " /data/videos/ " in the respective challenge's repo.
#### Description:
Video series is just a sequence of images arranged in a specific order. Images of that sequence are called frames. Therefore, in video intelligence tasks, we take advantage of the temporal nature of video and semantic content in consecutive frames.
#### Objective:
How to read video data and convert it into useable format for machine learning
#### Tasks:
- Load dataset from provided link. Videos are in “.mp4” format.
- Extract frames from video at fps=10 (opencv’s VideoCapture Class)
- Plot 4th frame of 'VID_2.mp4' (matplotlib or Pillow library)
- Print dimensions of any single frame of 'VID_6.mp4'
- Print all pixel values of 10th frame of 'VID_14.mp4'
- Perform sanity check for each video whether all frames have same dimensions or not
#### Further fun (will not be evaluated):
_Prerequisites: CNN and image processing_
- We will perform video classification for fun on this sample dataset. You can download labels here: _(Link to be added soon or self-annotation for small dataset is also possible)_
- Train image classifier on all frames extracted at fps=10 from all videos.
- The naive approach to do video classification would be to classify each frame and save results in sequential format, and that is it !! Obviously there are much better ways of doing video classification taking advantage of the temporal nature of data.
#### Helpful Links:
- Detailed description of how to process video frames: https://www.youtube.com/watch?v=tQetgoLy70s
- Nice tutorial on video classification: https://www.analyticsvidhya.com/blog/2018/09/deep-learning-video-classification-python/
- Used .avi format but the idea is same: https://www.analyticsvidhya.com/blog/2019/09/step-by-step-deep-learning-tutorial-video-classification-python/
- Line-by-Line explanation of video classification code: https://www.pyimagesearch.com/2019/07/15/video-classification-with-keras-and-deep-learning/
```
import cv2 # For handling videos
import matplotlib.pyplot as plt # For plotting images, you can use pillow library as well
import numpy as np # For mathematical operations
# Capture the video from a file
videoFile = 'data/videos/VID_2.mp4'
cap = cv2.VideoCapture(videoFile)
# Get frame rate of video
frameRate = cap.get(5)
print("Frame rate of video:", frameRate)
# Get time length of video
total_frames = cap.get(7)
print("Total frames:", total_frames)
print("Length of video: %.2f seconds" % (total_frames/frameRate))
# https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
# Get frame width and height
width = cap.get(3)
height = cap.get(4)
print("(width, height) = ", (width,height))
# Defining desired fps
desired_fps = 10
frame_skipping_rate = int(np.ceil(frameRate / desired_fps))
print("Frame skipping rate:", frame_skipping_rate, "frames")
# Store frames
frames = []
# Start extracting frames till we reach the end of the loop
while(cap.isOpened()):
# Get the current frame number
frameId = cap.get(1)
# Reads the next incoming frame
ret, frame = cap.read()
# If we reached the end of the video, then ret returns true
if (ret != True):
break
if (frameId % frame_skipping_rate == 0):
frames.append(frame)
cap.release()
print ("Done!")
# NHWC
single_video = np.array(frames)
print("NHWC format:", single_video.shape)
print("Plotted 4th frame of 2nd video")
plt.imshow(single_video[3,:,:,:])
plt.show()
print("Dimensions of 5th frame of 6th video")
single_video[4,:,:,:].shape
single_video[13,:,:,:]
```
### Here's the solution now
```
import glob
filenames = glob.glob('data/videos/*.mp4')
print(filenames)
videos = {}
for file in filenames:
cap = cv2.VideoCapture(file)
frameRate = cap.get(5)
desired_fps = 10
frame_skipping_rate = int(np.ceil(frameRate / desired_fps))
# Store frames
frames = []
# Start extracting frames till we reach the end of the loop
while(cap.isOpened()):
# Get the current frame number
frameId = cap.get(1)
# Reads the next incoming frame
ret, frame = cap.read()
# If we reached the end of the video, then ret returns true
if (ret != True):
break
if (frameId % frame_skipping_rate == 0):
frames.append(frame)
cap.release()
frames = np.array(frames)
videos[file] = frames
print("Number of videos:", len(videos))
plt.imshow(videos["data/videos\\VID_2.mp4"][3,:,:,:])
plt.show()
videos["data/videos\\VID_6.mp4"][4,:,:,:].shape
videos["data/videos\\VID_14.mp4"][13,:,:,:]
sanity_check = True
dim_set = set()
for video in videos.values():
dim_set.add(video[0].shape) # Get dimensions of first frame and add it in set
if len(dim_set)>1:
sanity_check = False
print("Sanity check:", sanity_check)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/AzucenaMV/top2000-dashboard/blob/main/top_2000_spotify_api.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import pandas as pd
import requests
import os
from google.colab import drive
drive.mount('/content/drive')
path = 'drive/MyDrive/JADS/DataVizProject/Code/'
file_top = "top2000.csv"
file_features = 'song_features_v2.csv'
file_id = 'song_id.csv'
file_artist = 'artist_features.csv'
df = pd.read_csv(os.path.join(path,file_top))
def clean_name(df, new_col = 'song_clean', old_col = 'song'):
df.dropna(subset = [old_col], inplace = True)
df[new_col] = df[old_col].str.lower()
df[new_col] = df[new_col].str.lstrip()
df[new_col] = df[new_col].str.replace("'","")
df[new_col] = df[new_col].str.normalize('NFKD').str.encode('ascii',errors='ignore').str.decode('utf-8')
return df
df = clean_name(df)
df = clean_name(df, new_col = 'artist_clean', old_col = 'artist')
# manually cleaning song names
df.loc[232,'song_clean'] = 'kronenburg park'
df.loc[2278,'song_clean'] = "rainy day woman 12"
df.loc[2817,'song_clean'] = 'everlong acoustic'
df.loc[3456,'song_clean'] = 'abergavernny'
df.loc[3482,'song_clean'] = "dont you write her off"
df.loc[3614,'song_clean'] = 'ein bisschen frieden'
df.loc[4205,'song_clean'] = 'everybody knows'
TOKEN = ""
# getting song id by name of the song (and artist)
data_list = []
for ind, (artist, song) in enumerate(zip(df['artist_clean'],df['song_clean'])):
r = requests.get(f'https://api.spotify.com/v1/search?q=track:{song}%20artist:{artist}&type=track&limit=1', headers={'Authorization': f'Bearer {TOKEN}'})
try:
json = r.json()['tracks']['items'][0]
data_list.append([ind, json['id'],json['name'],json['artists'][0]['name'],json['artists'][0]['id'],json['album']['name'],json['album']['release_date'], json['popularity'],json['duration_ms'],1])
except:
r = requests.get(f'https://api.spotify.com/v1/search?q=track:{song}&type=track&limit=1', headers={'Authorization': f'Bearer {TOKEN}'})
try:
json = r.json()['tracks']['items'][0]
data_list.append([ind, json['id'],json['name'],json['artists'][0]['name'],json['artists'][0]['id'],json['album']['name'],json['album']['release_date'], json['popularity'],json['duration_ms'],0])
except:
print(ind)
data_list.append([ind] + [''] * 9)
df_id = pd.DataFrame (data_list, columns = ['index', 'song_id','song_name','artist_name','artist_id','album_name','album_date','song_popularity','duration_ms','search_includes_artist'])
# saving file
df_id.to_csv(os.path.join(path,file_id), index = False)
df_id = pd.read_csv(os.path.join(path,file_id))
TOKEN = ""
# getting audio features with song id
import time
features_list = []
song_ids = df_id[df_id.song_id.notna()].song_id
for id in song_ids:
if id != '':
time.sleep(.5)
x = requests.get(f'https://api.spotify.com/v1/audio-features/{id}',
headers={'Authorization': f'Bearer {TOKEN}'})
features_list.append(x.json())
df_features = pd.DataFrame(features_list)
df_features.shape
# saving file
df_features.dropna(subset = ['id'], inplace = True)
df_features.to_csv(os.path.join(path,file_features), index = False)
TOKEN = ""
import time
artist_list = []
artist_ids = df_id.artist_id.unique()
for id in artist_ids:
if id != '':
time.sleep(.5)
x = requests.get(f'https://api.spotify.com/v1/artists/{id}',
headers={'Authorization': f'Bearer {TOKEN}'})
response = x.json()
artist_list.append([id,response['genres'],response['popularity'],response['name']])
df_artist = pd.DataFrame (artist_list, columns = ['artist_id', 'artist_genre','artist_popularity','artist_name'])
# saving artist features file
df_artist.to_csv(os.path.join(path,file_artist), index = False)
df_id = pd.read_csv(os.path.join(path,file_id))
df_features = pd.read_csv(os.path.join(path,file_features))
df_artist = pd.read_csv(os.path.join(path,file_artist))
# Removing possible duplicates
df_id = df_id.drop_duplicates(subset = ['song_id'], keep = 'first')
df_features = df_features.drop_duplicates(subset = ['id'], keep = 'first')
# Merging all dfs
df_merged = df_id.merge(df_features, how = 'left', left_on = 'song_id', right_on = 'id', suffixes = ("","_feature"))
df_final = df_merged.merge(df_artist, how = 'left', on = 'artist_id', suffixes = ("","_artist"))
df_final.shape
# Saving final df
file_spotify = "spotify_features.csv"
df_final.to_csv(os.path.join(path,file_spotify), index = False)
```
| github_jupyter |
# Uncertainty Quantification (UQ)
Approach:
1. Select some parameters to vary (e.g., the mean speed of pedestrians).
2. Use different distributions to estimate selected parameters.
3. Test effect on a so called quantity of intereset (e.g., the density).
That is, you feed different input distributions, simulate and check output. Create a figure of this idea by producing multiple input distributions represented as histograms. Then, use Inkscape or LibreOffice Draw to combine the three steps here:
```
+------------+ +------------+ +------------+
| Different | | | | Distribut- |
| input | | | | ion of |
| distribut- | ---> | Vadere | ---> | quantity |
| ions / hi- | | | | of |
| stograms | | | | interest |
+------------+ +------------+ +------------+
```
```
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
def use_custom_plot_settings(font_weight="normal"):
font_size_extra_small = 12
font_size_small = 16
font_size_medium = 18
font_size_big = 20
plt.style.use("default")
plt.rc("font", size=font_size_small, weight=font_weight)
plt.rc("axes", titlesize=font_size_big, titleweight=font_weight)
plt.rc("axes", labelsize=font_size_medium, labelweight=font_weight)
plt.rc("xtick", labelsize=font_size_small)
plt.rc("ytick", labelsize=font_size_small)
plt.rc("legend", fontsize=font_size_extra_small)
plt.rc("figure", titlesize=font_size_big, titleweight=font_weight)
def use_default_plot_settings():
plt.rcdefaults()
use_custom_plot_settings(font_weight="normal")
print(plt.style.available)
sns.set_style(style="white")
input_distributions = [
(np.random.normal, {}),
(np.random.uniform, {"low": -3, "high": 3}),
(np.random.triangular, {"left": 0, "mode": 0, "right": 3}),
]
sample_size = 200
for i, distribution in enumerate(input_distributions):
plt.figure(i)
f = distribution[0]
params = distribution[1]
x = f(**params, size=sample_size)
color="black" # color="C" + str(i)
kde_kws={"lw": 4}
ax = sns.distplot(x, bins=5, color=color, kde_kws=kde_kws, label=f.__name__)
plt.xticks([]) # labels
plt.yticks([])
ax.xaxis.set_ticks_position('none') # tick markers
ax.yaxis.set_ticks_position('none')
fig = ax.get_figure()
filename = "Input-Distribution-{}.pdf".format(f.__name__.capitalize())
fig.savefig(filename, bbox_inches="tight")
plt.show()
output_distribution = [
(np.random.poisson, {"lam": 1}),
(np.random.normal, {}),
(np.random.exponential, {}),
]
sample_size = 200
for i, distribution in enumerate(output_distribution):
plt.figure(i)
f = distribution[0]
params = distribution[1]
x = f(**params, size=sample_size)
color="red" # color="C" + str(i)
kde_kws={"lw": 4}
ax = sns.distplot(x, bins=5, color=color, kde_kws=kde_kws, label=f.__name__)
plt.xticks([]) # labels
plt.yticks([])
ax.xaxis.set_ticks_position('none') # tick markers
ax.yaxis.set_ticks_position('none')
fig = ax.get_figure()
filename = "Output-Distribution-{}.pdf".format(f.__name__.capitalize())
fig.savefig(filename, bbox_inches="tight")
plt.show()
```
| github_jupyter |
# Handwritten Digit Recognition With Deep Learning
#### A classic image recognition problem. Exploratory project - [repo here.](https://github.com/jeremyrcouch/digitrecognition)
---
The [MNIST](http://yann.lecun.com/exdb/mnist/) database is a collection of 70,000 handwritten digits (0 to 9). The goal is to build a model capable of recognizing a digit given only the image information. Deep learning is well suited to this task, so we're going to build a neural network to classify the digits.
```
import math
from typing import List
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from keras import layers, backend
from keras.datasets import mnist
from keras.models import Model
from keras.utils import np_utils
from keras.callbacks import Callback, LearningRateScheduler, EarlyStopping
RANDOM_SEED = 17
VAL_SET_RATIO = 0.125
FONTSIZE = 20
LINEWIDTH = 5
MARKERSIZE = 12
np.random.seed(RANDOM_SEED) # for repeatability
backend.set_image_data_format('channels_first')
def reshape_input_data(data: np.ndarray) -> np.ndarray:
"""Reshape and scale input (image) data for use.
Args:
data: numpy array, input data
Returns:
_: numpy array, reshaped and scaled input data
"""
return data.reshape(data.shape[0], 1, data.shape[1], data.shape[2])/255
def split_val_set_from_train(X_train_in: np.ndarray, y_train_in: np.ndarray,
val_ratio: float = 0.125):
"""Split a validation set out of the training set.
Args:
X_train_in: numpy array, training set images
y_train_in: numpy array, training set labels
val_ratio: float, ratio of training set to use as validation set
Returns:
(X_train, y_train), (X_val, y_val): numpy arrays of images and labels
"""
val_size = int(val_ratio*X_train_in.shape[0])
X_val = X_train_in[-val_size:, ...]
y_val = y_train_in[-val_size:, ...]
X_train = X_train_in[:X_train_in.shape[0] - val_size, ...]
y_train = y_train_in[:y_train_in.shape[0] - val_size, ...]
return (X_train, y_train), (X_val, y_val)
def visualize_fit(metrics):
"""Visualize fitting process."""
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(9, 9))
ax1.plot(metrics.acc, 'g', linewidth=LINEWIDTH,
markersize=MARKERSIZE, label='train')
ax1.plot(metrics.val_acc, 'b', linewidth=LINEWIDTH,
markersize=MARKERSIZE, label='val')
ax1.legend(fontsize=FONTSIZE-4)
ax1.grid()
ax1.set_ylabel('Accuracy', fontsize=FONTSIZE)
ax1.tick_params(labelsize=FONTSIZE-4)
ax2.plot(metrics.lr, 'r', linewidth=LINEWIDTH,
markersize=MARKERSIZE)
ax2.grid()
ax2.set_xlabel('Epoch', fontsize=FONTSIZE)
ax2.set_ylabel('Learning Rate', fontsize=FONTSIZE)
ax2.tick_params(labelsize=FONTSIZE-4)
fig.tight_layout()
def lr_function(epoch: int) -> float:
"""Learning rate function.
Args:
epoch: int, epoch number
Returns:
_: float, learning rate
"""
init = 0.001
drop = 0.5
epochs_drop = 5.0
return init * math.pow(drop, math.floor((1 + epoch)/epochs_drop))
class recordMetrics(Callback):
"""Class to record metrics during model training process."""
def on_train_begin(self, logs={}):
self.lr = []
self.losses = []
self.acc = []
self.val_losses = []
self.val_acc = []
def on_epoch_end(self, batch, logs={}):
self.lr.append(self.model.optimizer.get_config()['lr'])
self.losses.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
self.val_losses.append(logs.get('val_loss'))
self.val_acc.append(logs.get('val_acc'))
def make_mosaic(imgs: np.ndarray, nrows: int, ncols: int, border: int = 1) -> np.ndarray:
"""Given a set of images with all the same shape, makes a mosaic.
Args:
imgs: array of floats, image data
nrows: int, number of rows of mosaic
ncols: int, number of cols of mosaic
border: int, border pixels
Returns:
mosaic: masked array, image mosaic
"""
nimgs = imgs.shape[0]
imshape = imgs.shape[1:]
mosaic = np.ma.masked_all((nrows * imshape[0] + (nrows - 1) * border,
ncols * imshape[1] + (ncols - 1) * border),
dtype=np.float32)
paddedh = imshape[0] + border
paddedw = imshape[1] + border
for i in range(nimgs):
row = int(np.floor(i / ncols))
col = i % ncols
mosaic[row*paddedh:row*paddedh + imshape[0],
col*paddedw:col*paddedw + imshape[1]] = imgs[i]
return mosaic
def classification_heat_map(y_test: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
"""Builds classification heat map.
Args:
y_test: array of ints, true labels
y_pred: array of floats, predicted labels
Returns:
heat: array of ints, true vs predicted counts
"""
cat_num = len(np.unique(y_test))
heat = np.zeros((cat_num, cat_num), dtype='int')
for test, pred in zip(y_test, y_pred):
heat[test, pred] += 1
return heat
def plot_heat(zero_heat):
"""Plot heat map."""
fig = plt.figure(figsize=(9, 9))
img = plt.imshow(zero_heat, cmap='viridis')
plt.xlabel('Predicted', fontsize=FONTSIZE)
plt.ylabel('True', fontsize=FONTSIZE)
plt.xticks([i for i in range(10)])
plt.yticks([i for i in range(10)])
plt.tick_params(axis='both', labelsize=FONTSIZE)
ax = plt.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(img, cax=cax)
cbar.ax.tick_params(labelsize=FONTSIZE-4)
cbar.ax.get_xaxis().labelpad = 7
cbar.ax.set_xlabel('Count', fontsize=FONTSIZE-4)
cbar.outline.set_visible(False)
fig.tight_layout()
def plot_weights(weights):
"""Plot weights."""
fig = plt.figure(figsize=(9, 9))
img = plt.imshow(weights, cmap='viridis')
plt.tick_params(axis='both', labelsize=FONTSIZE)
ax = fig.gca()
ax.axis('off')
fig.tight_layout()
def plot_layer_outputs(model, layer, X_test: np.ndarray, test_ind: int):
"""Plot layer outputs.
Args:
model: keras model
layer: keras layer
X_test: numpy array, test images
test_ind: int, input index
Returns:
image of layer output
"""
functor = backend.function([model.input] + [backend.learning_phase()], [layer.output])
X = X_test[test_ind, ...][np.newaxis, ...]
layer_result = np.squeeze(functor([X, 1])[0])
mosaic_dim = int(np.ceil(np.sqrt(layer_result.shape[0])))
mosaic = make_mosaic(layer_result, mosaic_dim, mosaic_dim)
plot_weights(mosaic)
def plot_single_input(ax, X: np.ndarray, y: int, y_pred: int = None, cmap: str = 'gray_r'):
"""Plot single input image.
Args:
ax: axis
X: numpy array, image
y: int, label
y_pred: int, predicted label
cmap: str, colormap to use
Returns:
input image
"""
plt.imshow(np.squeeze(X), cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
title = 'True {}'.format(y)
title += ', Predicted {}'.format(y_pred) if y_pred is not None else ''
plt.title(title, fontsize=FONTSIZE)
def plot_input_images(X: np.ndarray, y: np.ndarray, inds: List[int],
y_pred: np.ndarray = None, cmap: str = 'gray_r'):
"""Plot input image(s).
Args:
X: numpy array, input data
y: numpy array, label data
inds: list of int, indices to plot
y_pred: numpy array, predicted labels
cmap: str, color map to use
Returns:
input images
"""
ncols = min(3, len(inds))
nrows = np.ceil(len(inds)/ncols)
fig = plt.figure(figsize=(4*ncols, 4*nrows))
for i, ind in enumerate(inds):
ax = plt.subplot(nrows, ncols, i+1)
pred = y_pred[ind] if y_pred is not None else None
plot_single_input(ax, X[ind, ...], y[ind], pred, cmap)
def get_worst_predictions(y_test: np.ndarray, y_pred: np.ndarray, num: int = 5) -> np.ndarray:
"""Get worst prediction indices.
Args:
y_test: numpy array, test input data
y_pred: numpy array, test label data
num: int, number of indices to return
Returns:
_: numpy array, worst indices by confidence
"""
pred_scores = np.array([(1-y_pred[i, y_test[i]]) for i in range(len(y_pred))])
return np.argsort(pred_scores)[-num:]
(X_train_raw, y_train_raw), (X_test_raw, y_test_raw) = mnist.load_data()
X_train_prep = reshape_input_data(X_train_raw)
X_test = reshape_input_data(X_test_raw)
y_train_prep = np_utils.to_categorical(y_train_raw).astype(int)
y_test = np_utils.to_categorical(y_test_raw).astype(int)
(X_train, y_train), (X_val, y_val) = split_val_set_from_train(X_train_prep, y_train_prep, VAL_SET_RATIO)
# look at a couple random digits from the training set
rand_inds = list(np.random.randint(0, high=len(X_train), size=6))
plot_input_images(X_train, np.array([np.argmax(y) for y in y_train]), rand_inds)
```
Above are some examples of the handwritten digits. For each, we have 28x28 pixels with one color channel (greyscale). I separated the data into training, validation and test sets - taking a portion of the samples from the training set for the validation set to use for model architecture selection and parameter tuning. The samples in the test set will be held out for final model evaluation.
As this is just an exploratory project, I wanted to manually explore the model architecture space rather than use any sort of automated search. I monitored the accuracy on my validation set while making changes.
```
# model architecture
DENSE_LAYERS = 1
DENSE_NODES = 256 # generally keep between input and output sizes
ACTIVATION = 'relu'
CONV_LAYERS = 2
FILTERS = 16
PATCHES = 3
img_dim = X_train.shape[-1]
inputs = layers.Input(shape=(1, img_dim, img_dim), name='inputs')
# convolutional layers with max pooling after each
for lay_i in range(CONV_LAYERS):
layer_in = inputs if lay_i == 0 else x
x = layers.Conv2D(FILTERS, (PATCHES, PATCHES), activation=ACTIVATION,
data_format='channels_first', name='conv{}'.format(lay_i))(layer_in)
x = layers.MaxPooling2D(pool_size=(2, 2), name='pool{}'.format(lay_i))(x)
# dropout if there was a convolutional layer before flatten for dense input
if CONV_LAYERS > 0:
x = layers.Dropout(0.2, name='conv_dropout')(x)
x = layers.Flatten(name='flatten')(x)
else:
x = layers.Flatten(name='flatten')(inputs)
# dense layers with dropout after each
for lay_i in range(DENSE_LAYERS):
dense = int(DENSE_NODES/(lay_i+1))
x = layers.Dense(dense, activation=ACTIVATION, name='dense{}'.format(lay_i))(x)
x = layers.Dropout(0.25, name='dense_dropout{}'.format(lay_i))(x)
output = layers.Dense(10, activation='softmax', name='output')(x)
model = Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
```
I used Keras' functional API for this neural network because it provides a straightforward way to build up a model in a dynamic way. First, let's take a look at the model I landed on.
```
model.summary()
```
There's a total of 10 layers, summarized below:
1. Input image
2. First convolutional layer, detects low level shapes
3. Max pooling layer down samples and provides invariance to translation, rotation and scale
4. Second convolutional layer, detects higher level features
5. Max pooling layer down samples and provides invariance to translation, rotation and scale
6. Dropout layer mitigates overfitting
7. Flatten layer gets the data in the right shape for dense layers
8. Dense layer
9. Dropout layer mitigates overfitting
10. Output layer with softmax activation acts as a classifier
Deciding on a model structure and tuning the parameters is an iterative process. I set up an early stopping callback for both the training and validation data, so that if the accuracy of either one stalled it would stop the fit. I also defined a simple step style learning rate scheduler that cuts the learning rate in half every couple epochs. This helps the optimizer continue to improve the model's accuracy as the fitting process wears on. I recorded the training and validation data losses and accuracy along with the learning rate every epoch, and at the end of the fit I plotted a simple visualization to see how training went.
The main thing I was looking for after fitting the model is how well the validation set accuracy tracked the training set accuracy. If the training accuracy is well above the validation accuracy, then we know we're likely overfitting.
```
early_stop_train = EarlyStopping(monitor='acc', min_delta=0.001, patience=3,
verbose=0, mode='auto')
early_stop_test = EarlyStopping(monitor='val_acc', min_delta=0.001, patience=3,
verbose=0, mode='auto')
lr_scheduler = LearningRateScheduler(lr_function)
metrics = recordMetrics()
EPOCHS = 20
history = model.fit(X_train, y_train, epochs=EPOCHS, verbose=1,
callbacks=[metrics, early_stop_train, early_stop_test, lr_scheduler],
validation_data=(X_val, y_val))
visualize_fit(metrics)
```
As you can see, it only took a couple epochs to get above 99% accuracy on both the training and validation sets. By lowering the learning rate every so often, we're helping the model get incrementally better without bouncing around. Importantly, the validation accuracy tracked with the training accuracy. I'm happy with the final model here - 99.33% accuracy on the held out test set isn't too shabby.
Let's take a closer look at the model's performance.
```
y_pred = model.predict(X_test)
y_pred_max = np.array([np.argmax(pred) for pred in y_pred])
scores = model.evaluate(x=X_test, y=y_test)
print('Accuracy = {:.2%}'.format(scores[1]))
heat = classification_heat_map(y_test_raw, y_pred_max)
miss_heat = heat.copy()
np.fill_diagonal(miss_heat, val=0)
plot_heat(miss_heat)
```
The y-axis of the heat map is the true digit and the x-axis is the predicted digit, with the color of the square corresponding to the number of samples for that pairing. Clearly, there were quite a few instances where we thought `9`s were `4`s, `7`s were `2`s, and `3`s were `5`s. These mixups are not unexpected, so that's a good sign the model isn't confusing numbers with significantly different shapes.
Let's look at the digits we predicted incorrectly **and** had a high confidence about.
```
worst_inds = get_worst_predictions(y_test_raw, y_pred, num=6)
plot_input_images(X_test, y_test_raw, list(worst_inds), y_pred_max, cmap='viridis')
```
I mean.. some of these are pretty poorly written digits. Others, like the lower row, middle column `6` does not look like a `5` like was predicted (even though it's a better `G` than a `6`). Depending on the application, missing digits like these may or may not be acceptable.
Time to take a closer look at what the model is doing.
```
test_ind = 0
layer_info = {layer.name: layer for layer in model.layers}
layer = layer_info['conv0']
layer_weights = layer.get_weights()
filter_weights = np.array([np.squeeze(layer_weights[0][..., i]) for i in range(FILTERS)])
mosaic_dim = int(np.ceil(np.sqrt(filter_weights.shape[0])))
weights_mosaic = make_mosaic(filter_weights, mosaic_dim, mosaic_dim)
plot_weights(weights_mosaic)
```
These are the weights of the filters for the first convolutional layer (I left out the axes and colorbar because only the relative values matter for right now). See [this great article](http://colah.github.io/posts/2014-07-Understanding-Convolutions/) for an explanation of convolutions, but in simple terms, imagine each of these filters sliding over each spot on the image. Where the filter and the image match up well, the output of the layer will "light up." Technically, "light up" is a garbage way to describe it, but you'll see why I said that coming up. Next, we'll show a selected digit (the input to our model) and then the output of our convolutional layer.
```
# show test image and conv filter ouputs
plot_input_images(X_test, y_test_raw, [test_ind], y_pred_max, cmap='viridis')
plot_layer_outputs(model, layer, X_test, test_ind)
```
The filters activate the input image in different ways. Although each input image will activate the filters differently, *similar inputs will activate the same filters in similar locations*. Although I am aware of the flaws of max pooling[^1], it is, for better or worse, effective for making the neural network invariant to image translation, scaling and orientation.
The techniques used for this problem can be applied to a range of image recognition tasks. Experimenting with model architectures is often necessary to get good results, but there are a variety of resources out there to suggest architectures that are well suited to certain problem types.
[^1]: Geoffrey Hinton argues "The pooling operation used in convolutional neural networks is a big mistake and the fact that it works so well is a disaster."
| github_jupyter |
# Machine Learning and Statistics for Physicists
Material for a [UC Irvine](https://uci.edu/) course offered by the [Department of Physics and Astronomy](https://www.physics.uci.edu/).
Content is maintained on [github](github.com/dkirkby/MachineLearningStatistics) and distributed under a [BSD3 license](https://opensource.org/licenses/BSD-3-Clause).
##### ► [View table of contents](Contents.ipynb)
```
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
import pandas as pd
```
## Tensor Computing
Most practical algorithms of ML can be decomposed into small steps where the calculations are expressed with linear algebra, i.e., linear combinations of scalars, vectors and matrices.
For example, a neural network can be built from layers that each calculate
$$
\mathbf{x}_\text{out} = \max(0, W \mathbf{x}_\text{in} + \mathbf{b}) \; ,
$$
where $W$ is a matrix, and boldface symbols represent vectors. In typical applications, $\mathbf{x}_\text{out}$ and $\mathbf{x}_\text{in}$ are derived from **data** while $W$ and $\mathbf{b}$ are considered **model parameters**. (This expression is not strictly linear: why?)
The python numeric and list types can represent arbitrary scalars, vectors, and matrices, but are designed for flexibility instead of efficiency.
Numpy is instead optimized for the special case where all list elements are numeric values of the same type, which can be organized and accessed very efficiently in memory, with a specialized array type with lots of nice features. One downside of this approach is that most of builtin math functions are duplicated (e.g., `math.sin` and `np.sin`) to work with numpy arrays.
**EXERCISE:** Complete the function below using numpy to evaluate the neural-network layer defined above:
```
def xout(W, xin, b):
return np.maximum(0, W.dot(xin) + b)
#maximum takes two vectors and will component-wise find the maximum
#max looks at all the numbers and finds the scalar max.
```
### Terminology
We frequently use $\mathbf{r} = (x, y, z)$ in physics to represent an *arbitrary* position in three (continuous) dimensions.
In numpy, we cannot represent an *arbitrary* position but can easily represent a *specific* position, for example:
```
rvec = np.array([0.1, -0.2, 0.3])
```
However, `rvec` has only one (discrete) dimension, which we use to access its three array elements with indices 0,1,2:
```
rvec[0], rvec[1], rvec[2]
```
Note how we use the term **dimension** differently in these two cases!
All numpy arrays have a `shape` property that specifies the range of indices allowed for each of their (discrete) dimensions:
```
rvec.shape
rvec.ndim
```
Compare with a matrix represented in numpy:
```
matrix = np.identity(3)
print(matrix)
matrix[1, 0], matrix[1, 1]
matrix.shape
matrix.ndim
```
Numpy supports arrays with any (finite) number of (discrete) dimensions. The general name for these arrays is a **tensor** (so, scalars, vectors and matrices are tensors). For example:
```
tensor = np.ones((2, 3, 4))
print(tensor)
tensor[0, 0, 0], tensor[1, 2, 3]
tensor.shape
tensor.ndim
```
Tensors are used in physics also: for example, the tensor expression $g^{il} \Gamma^m_{ki} x^k$ arises in [contravariant derivatives in General Relativity](https://en.wikipedia.org/wiki/Christoffel_symbols#Covariant_derivatives_of_tensors). What are the **dimensions** of $g$, $\Gamma$ and $x$ in this expression? Note that numpy tensors do not make any distinction between upper or lower indices.
The numpy dimension is sometimes also referred to as the **rank**, but note that [array rank](https://en.wikipedia.org/wiki/Rank_(computer_programming)) is similar to but subtly different from [linear algebra rank](https://en.wikipedia.org/wiki/Rank_(linear_algebra)).
### Fundamental Operations
#### Tensor Creation
The most common ways you will create new arrays are:
- Filled with a simple sequence of constant values
- Filled with (reproducible) random values
- Calculated as a mathematical function of existing arrays.
```
# Regular sequence of values
shape = (3, 4)
c1 = np.zeros(shape)
c2 = np.ones(shape)
c3 = np.full(shape, -1)
c4 = np.arange(12)
# Reproducible "random" numbers
gen = np.random.RandomState(seed=123)
r1 = gen.uniform(size=shape)
r2 = gen.normal(loc=-1, scale=2, size=shape)
# Calculated as function of existing array.
f1 = r1 * np.sin(r2) ** c3
```
All the values contained within a tensors have the same [data type](https://docs.scipy.org/doc/numpy-1.15.0/user/basics.types.html), which you can inspect:
```
c1.dtype, c4.dtype
```
**EXERCISE:** Try to guess the `dtype` of `c3`, `r2` and `f1`, then check your answer. Deep learning often uses smaller (32 or 16 bit) float data types: what advantages and disadvantages might this have?
```
print(c3.dtype=='int')
print(r2.dtype=='float')
print(f1.dtype=='float')
```
<span style="color:blue">
GPUs have a special memory architecture that is expensive, so we ARE memory constrained.
<br/><br/>
Also, by making the data more coarse, we can insert some stochasticity, the usefulness of which the lecture slides point to.
</span>
**SOLUTION:** The `zeros` and `ones` functions default to `float64`, but `full` uses the type of the provided constant value. Integers are automatically promoted to floats in mixed expressions.
```
c3.dtype, r2.dtype, f1.dtype
```
Smaller floats allow more efficient use of limited (GPU) memory and faster calculations, at the cost of some accuracy. Since the training of a deep neural network is inherently noisy, this is generally a good tradeoff.
#### Tensor Reshaping
It is often useful to [reshape](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.reshape.html) a tensor without changing its total size, which can be done very efficiently since the layout of the tensor values in memory does not need to be changed:
```
c4.reshape(c1.shape)
```
**EXERCISE:** Predict the result of `c4.reshape(2, 3, 2)` then check your answer.
```
c4.reshape(2, 3, 2)
```
#### Tensor Broadcasting
The real power of tensor computing comes from expressions like this:
```
# Add one to each element.
c4 + 1
# Scale each column of the 3x4 ones matrix by a different value.
np.ones(shape=(3, 4)) * np.arange(4)
```
The results are not surprising in these examples, but something non-trivial is going on behind the scenes to make this work since we are combining tensors with different shapes. This is called [broadcasting](https://docs.scipy.org/doc/numpy-1.15.0/user/basics.broadcasting.html) and has specific rules for how to handle less obvious cases.
Broadcasting serves two purposes:
- It allows more compact and easier to understand "vectorized" expressions, where loops over elements in each dimension are implicit.
- It enables automatic optimizations to take advantage of the available hardware, since explicit python loops are generally a bottleneck.
Not all expressions can be automatically broadcast, even if they seem to make sense. For example:
```
# Scale each row of the 3x4 ones matrix by a different value.
try:
np.ones(shape=(3, 4)) * np.arange(3)
except ValueError as e:
print(e)
```
However, you can usually reshape the inputs to get the desired result:
```
np.ones(shape=(3, 4)) * np.arange(3).reshape(3, 1)
```
Another useful trick is to use `keepdims=True` with reducing functions, e.g.
```
print(np.ones((4, 3)).sum(axis=1))
print(np.ones((4, 3)).sum(axis=1, keepdims=True))
```
To experiment with broadcasting rules, define a function to try broadcasting two arbitrary tensor shapes:
```
def broadcast(shape1, shape2):
array1 = np.ones(shape1)
array2 = np.ones(shape2)
try:
array12 = array1 + array2
print('shapes {} {} broadcast to {}'.format(shape1, shape2, array12.shape))
except ValueError as e:
print(e)
broadcast((1, 3), (3,))
broadcast((1, 2), (3,))
```
**EXERCISE:** Predict the results of the following then check your answers:
```
broadcast((3, 1, 2), (3, 2))
broadcast((2, 1, 3), (3, 2))
broadcast((3,), (2, 1))
broadcast((3,), (1, 2))
broadcast((3,), (1, 3))
```
```
broadcast((3, 1, 2), (3, 2))
broadcast((2, 1, 3), (3, 2))
broadcast((3,), (2, 1))
broadcast((3,), (1, 2))
broadcast((3,), (1, 3))
```
### Tensor Frameworks
#### Numpy
Numpy is an example of a framework for tensor computing that is widely supported and requires no special hardware. However, it still offers significant performance improvements by eliminating explicit python loops and using memory efficiently.
For example, let's calculate the opening angle separation between two unit vectors, each specified with (lat, lon) angles in radians (or RA,DEC for astronomers, as implemented [here](https://desisurvey.readthedocs.io/en/latest/api.html#desisurvey.utils.separation_matrix)). The [Haversine formula](https://en.wikipedia.org/wiki/Haversine_formula) is a good way to calculate this quantity.
Generate a large number of random unit vectors for benchmarking (are these uniformly distributed on the sphere?)
```
def generate(N, seed=123):
gen = np.random.RandomState(seed=123)
lats = gen.uniform(low=-np.pi / 2, high=+np.pi / 2, size=N)
lons = gen.uniform(low=0, high=2 * np.pi, size=N)
plt.plot(lons, lats, '.')
return lats, lons
lats, lons = generate(N=1000)
lats.shape
```
Use explicit python loops to calculate the (square) matrix of separation angles between all pairs of unit vectors:
```
def separation_matrix_loops():
# Allocate memory for the matrix.
N = len(lats)
matrix = np.empty((N, N))
for i, (lat1, lon1) in enumerate(zip(lats, lons)):
for j, (lat2, lon2) in enumerate(zip(lats, lons)):
# Evaluate the Haversine formula for matrix element [i, j].
matrix[i, j] = 2 * np.arcsin(np.sqrt(
np.sin(0.5 * (lat2 - lat1)) ** 2 +
np.cos(lat1) * np.cos(lat2) * np.sin(0.5 * (lon2 - lon1)) ** 2))
return matrix
%time S1 = separation_matrix_loops()
```
Now calculate the same separations using numpy implicit loops:
```
def separation_matrix_numpy():
lat1, lat2 = lats, lats.reshape(-1, 1)
lon1, lon2 = lons, lons.reshape(-1, 1)
return 2 * np.arcsin(np.sqrt(
np.sin(0.5 * (lat2 - lat1)) ** 2 +
np.cos(lat1) * np.cos(lat2) * np.sin(0.5 * (lon2 - lon1)) ** 2))
```
Check that both calculations give the same results:
```
np.allclose(S1, S2)
```
Since this is so much faster, increase the amount of computation (and memory) 100x for a better benchmark:
```
lats, lons = generate(N=10000)
%time S2 = separation_matrix_numpy()
```
Therefore using implicit numpy loops speeds up the calculation by a factor of about 6.8 / 0.02 = 340. Since we are using the efficient numpy arrays in both cases, the speed up is entirely due to the loops!
#### Other Frameworks: PyTorch and TensorFlow
Machine learning relies heavily on frameworks that copy the successful numpy design for tensor computing, while adding some important new features:
- Automatic hardware acceleration.
- Automatic calculation of derivatives.
- Efficient deployment to other platforms (mobile, cloud).
Unlike numpy, the default type in these frameworks is usually a 32-bit float, rather than a 64-bit float.
The two most popular tensor computing frameworks for machine learning today are [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/). Both are large open-source projects, primarily developed by facebook (pytorch) and google (tensorflow). These frameworks were originally quite different, with pytorch preferred for research and tensorflow preferred for large-scale deployment, but they are gradually converging towards similar a feature set.
Below, we repeat our calculation of the separation matrix with both of these frameworks. You will notice that the new features come with some additional complexity.
#### PyTorch Example
```
import torch
device = torch.device("cuda") if torch.cuda.is_available() else "cpu"
print(f'Using device: {device}.')
lons_pt = torch.tensor(lons, device=device)
lats_pt = torch.tensor(lats, device=device)
def separation_matrix_torch():
lat1, lat2 = lats_pt, lats_pt.reshape(-1, 1)
lon1, lon2 = lons_pt, lons_pt.reshape(-1, 1)
return 2 * torch.asin(torch.sqrt(
torch.sin(0.5 * (lat2 - lat1)) ** 2 +
torch.cos(lat1) * torch.cos(lat2) * torch.sin(0.5 * (lon2 - lon1)) ** 2))
%time S3 = separation_matrix_torch()
np.allclose(S2, S3.numpy())
```
#### TensorFlow Example
```
import tensorflow as tf
device = 'GPU:0' if tf.config.list_physical_devices('GPU') else 'CPU:0'
print(f'Using device: {device}.')
with tf.device(device):
lons_tf = tf.constant(lons)
lats_tf = tf.constant(lats)
def separation_matrix_tensorflow():
lat1, lat2 = lats_tf, tf.reshape(lats_tf, [-1, 1])
lon1, lon2 = lons_tf, tf.reshape(lons_tf, [-1, 1])
return 2 * tf.asin(tf.sqrt(
tf.sin(0.5 * (lat2 - lat1)) ** 2 +
tf.cos(lat1) * tf.cos(lat2) * tf.sin(0.5 * (lon2 - lon1)) ** 2))
%time S4 = separation_matrix_tensorflow()
np.allclose(S2, S4.numpy())
```
#### Hardware Acceleration
Tensor computing can be sped up significantly (10-100x) using hardware that is optimized to perform tensor computing by distributing simple calculations ("kernels") across many independent processors ("cores") running in parallel.
The original driver for such hardware was to accelerate the 3D geometry calculations required to render real time 3D graphics, leading to the first [Graphics Processing Units (GPUs)](https://en.wikipedia.org/wiki/Graphics_processing_unit) in the 1990s. More recently, GPUs have been adopted for purely numerical calculations, with no display attached, leading to the development of specialized programming languages such as [CUDA](https://en.wikipedia.org/wiki/CUDA) and [OpenCL](https://en.wikipedia.org/wiki/OpenCL).
Currently, one vendor (Nvidia) dominates the use of GPUs for ML with its proprietary CUDA language. Google has also introduced an even more specialized [TPU](https://en.wikipedia.org/wiki/Tensor_processing_unit) architecture.
The table below shows some benchmarks for the separation matrix problem, running on different hardware with different frameworks. The speed ups obtained using PyTorch and TensorFlow with a GPU are typical. The two frameworks provide comparable GPU performance overall, but can differ on specific problems.
| Test | Laptop |Server(GPU) | Collab(CPU) | Collab(GPU) |
|------------|--------|------------|-------------|-------------|
| numpy | 2.08s | 1.17s | 10.5s | 10.3s |
| torch | 7.32s | 48.7ms | --- | --- |
| tensorflow | --- | --- | 9.11s | 246ms |
| ratio | 3.5 | 1 / 24 | 0.87 | 1 / 41 |
To benefit from this hardware, you can either add a GPU to a linux server, or use a cloud computing platform.
Cloud computing is the easiest way to get started. There are some free options, but generally you have to "pay as you go" to do a useful amount of work. Some good starting points are:
- [Google Collaboratory](https://colab.research.google.com/): free research tool with a jupyter notebook front end.
- [PaperSpace](https://www.paperspace.com/): reasonably priced and simple to get started.
- [Amazon Web Services](https://aws.amazon.com/ec2/): free to try, very flexible and relatively complex.
- [Google Cloud](https://cloud.google.com/): comparable to AWS.
**Note: this is not a complete list, and pricing and capabilities are rapidly changing.**
If you are considering building your own GPU server, start [here](http://timdettmers.com/2018/11/05/which-gpu-for-deep-learning/). A single server can host 4 GPUs. Here is a single water-cooled [RTX 2080 Ti](https://www.nvidia.com/en-us/geforce/graphics-cards/rtx-2080-ti/) GPU installed in my office:

### Automatic Derivatives
In addition to hardware acceleration, a key feature of tensor computing frameworks for ML is their ability to automate the calculation of derivatives, which then enable efficient and accurate gradient-based optimization algorithms.
In general, a derivate can be implemented in software three ways:
- Analytically (using paper or mathematica) then copied into code: this is the most efficient and accurate but least generalizable.
- Numerically, with [finite difference equations](https://en.wikipedia.org/wiki/Finite_difference): this is the least efficient and accurate, but most generalizable.
- [Automatically](https://en.wikipedia.org/wiki/Automatic_differentiation): a hybrid approach where a small set of primitive functions (sin, cos, log, ...) are handled analytically, then the derivatives of expressions using these primitives are computed on the fly using the chain rule, product rule, etc. This is efficient and accurate, but requires that expressions are built entirely from primitives that support AD.
As a concrete example calculate the (un-normalized) Gaussian distribution
$$
y(x) = e^{-x^2}
$$
in PyTorch:
```
x = torch.linspace(-5, 5, 20, requires_grad=True)
y = torch.exp(-x ** 2)
x
y
```
We specify `requires_grad=True` to enable AD for all tensors that depend on `x` (so just `y` in this case). To calculate partial derivatives ("gradients") of `y` wrt `x`, use:
```
y.backward(torch.ones_like(y))
```
The tensor `x.grad` now contains $y'(x)$ at each value of `x`:
```
x.grad
x_n = x.detach().numpy()
yp_n = x.grad.detach().numpy()
y_n = y.detach().numpy()
plt.plot(x_n, y_n, 'o--', label='$y(x)$')
plt.plot(x_n, yp_n, 'o:', label='$y^\prime(x)$')
plt.legend();
```
Note that these derivatives are calculated to full machine precision and not affected by the coarse spacing in $x$.
[Jax](https://github.com/google/jax) is a relatively new framework for automatic differentiation (developed by google but independent of tensorflow) that relies on "just-in-time" compilation and is designed for ML research.
### Higher-Level APIs for Tensor Computing
Although TensorFlow and PyTorch are both similar to numpy, they have different APIs so you are forced to choose one to take advantage of their unique features. However, for many calculations they are interchangeable, and a new ecosystem of higher-level APIs is growing to support this. For example, check out:
- [Tensorly](http://tensorly.org/stable/index.html): "*Tensor learning in python*". Includes powerful [decomposition](https://arxiv.org/abs/1711.10781) (generalized PCA) and regression algorithms.
- [einops](https://github.com/arogozhnikov/einops): "*Deep learning operations reinvented*". Supports compact expressions for complex indexing operations ([np.einsum](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html) on steroids).
Neither of these packages are included in the MLS conda environment, but I encourage you to experiment with them if you want to write framework-independent tensor code.
| github_jupyter |
# Plot Earth-Relative Atmospheric Angular Momentum
#### This notebook plots daily earth-relative atmospheric angular momentum (AAM) calculated using data from the 20th Century Reanalysis Project Version 3 (see AAM_Calculation_20CR.ipynb).
#### Import the necessary libraries.
```
import xarray as xr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import ipywidgets
from ipywidgets import interact, interactive
from datetime import datetime, timedelta
```
#### Read in the 1836-2015 AAM dataset using Xarray.
```
ds = xr.open_dataset('/home/scratch/20CR_v3/daily_aam_1836_2015.nc')
ds
```
### Plot the full time series of daily global Earth-relative AAM (1836-2015).
```
# Plot total AAM for the full 20CR period using the Xarray plotting function
ds.Mr.plot(figsize=(16,10))
plt.title('Global Earth-Relative AAM: 1836-2015 ', fontsize=20)
plt.xlabel('Date', fontsize=15)
plt.ylabel("$M_R$"+' '+"$(kg \cdot m^2 \cdot s^{-1})$", fontsize=15)
plt.show()
```
#### Create a plot of global earth-relative AAM for a selected time period. Select desired start and end times between 01-01-1836 and 12-31-2015.
```
start = ipywidgets.DatePicker(description = 'Start Date')
end = ipywidgets.DatePicker(description = 'End Date')
display(start, end)
def Plot_Mr():
# Converts start and end time to strings for file selection
start_time = f'{start.value}T00:00:00.000000000'
end_time = f'{end.value}T00:00:00.000000000'
# Selects Mr for the desired date range
Mr = ds.Mr.sel(time = slice(start_time, end_time))
# Create an array of the selected dates in the date range
date_generated = pd.date_range(start_time, end_time)
# Plot daily Mr for the selected date range
plt.figure(figsize=(12,8))
plt.plot(date_generated, Mr)
plt.title(f'Global Earth-Relative AAM: {start.value} to {end.value}', fontsize=20)
plt.xlabel('Date', fontsize=15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.ylabel("$M_R$"+' '+"$(kg \cdot m^2 \cdot s^{-1})$", fontsize=15)
plt.show()
# Call the Plot_Mr function to create the time series of Mr
Plot_Mr()
```
### Plot the standardized anomalies of daily global Earth-relative AAM (1836-2015).
```
# Plot the standardized anomalies of global Mr for the full 20CR period
ds.Mr_stdanom.plot(figsize=(12,8), linewidth=0.5)
plt.title('Standardized Anomalies of Global Earth-Relative AAM: 1836-2015', fontsize=20)
plt.axhline(y=0.0, color='black', linestyle='-', linewidth=0.5)
plt.xticks(fontsize=12)
plt.xlabel('Date', fontsize=15)
plt.yticks(fontsize=12)
plt.ylabel("$M_R$ Standardized Anomalies"+' '+"$(kg \cdot m^2 \cdot s^{-1})$", fontsize=15)
plt.show()
```
### Plot Earth-relative AAM by latitude.
```
# Get all the dates in the dataset
date = ds.time.values
# The plotter function plots the relative AAM by latitude (Mr_by_lat) for each day in the dataset
def Plotter(date=date):
var = ds.sel(time=date)
fig = plt.figure(figsize=(12, 8))
plt.plot(var.lat, var.Mr_by_lat, linewidth=3) # x-axis= latitude, y-axis= rel. AAM by latitude
plt.title('Daily $M_R$ by Latitude', fontsize=20)
plt.xlim((-90., 90.))
plt.xticks(np.arange(-90, 100, 20))
plt.xlabel('$\phi$', fontsize=14)
plt.ylim(ds.Mr_by_lat.min(), ds.Mr_by_lat.max())
plt.ylabel("$M_R$"+' '+"$(kg \cdot m^2 \cdot s^{-1})$", fontsize=14)
```
#### Create an IPython Widget to make an interactive plot. Use the drop down menu to select any date from 1836 to 2015.
```
# ipywidgets interactive function enables an interactive user interface; display the widget
w = interactive(Plotter)
display(w)
```
| github_jupyter |
# *Data Visualization and Statistics*
Gallery of Matplotlib examples: [https://matplotlib.org/gallery.html](https://matplotlib.org/gallery.html)
```
## First, let's import some packages.
import os
from pprint import pprint
from textblob import TextBlob
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# The line above tells Jupyter to display Matplotlib graphics within the notebook.
## Download sample text corpora from GitHub, then unzip.
os.chdir('/sharedfolder/')
!wget -N https://github.com/pcda17/pcda17.github.io/blob/master/week/8/Sample_corpora.zip?raw=true -O Sample_corpora.zip
!unzip -o Sample_corpora.zip
os.chdir('/sharedfolder/Sample_corpora')
os.listdir('./')
!ls Jane_Austen
!ls Herman_Melville
## Loading a Melville novel as a TextBlob object
melville_path = 'Herman_Melville/Moby_Dick.txt'
melville_blob = TextBlob(open(melville_path).read().replace('\n', ' '))
## Loading an Austen novel as a TextBlob object
austen_path = 'Jane_Austen/Pride_and_Prejudice.txt'
austen_blob = TextBlob(open(austen_path).read().replace('\n', ' '))
## Recall that 'some_textblob_object.words' is a WordList object ...
melville_blob.words[5100:5140]
# ... which we can cast to an ordinary list.
list(melville_blob.words[5100:5140])
## And 'some_textblob_object.sentences' is a list of Sentence objects ...
austen_blob.sentences[100:105]
# ... which we can convert to a list of strings using a list comprehension.
[str(item) for item in austen_blob.sentences[100:105]]
## For reference, here's another example of a list comprehension:
word_list = ['Call', 'me', 'Ishmael.']
uppercase_list = [word.upper() for word in word_list]
uppercase_list
## And one more for good measure:
string_nums = [str(i) for i in range(12)]
string_nums
```
### ▷ Sentiment analysis with TextBlob
Details on the training data that NLTK (via TextBlob) uses to measure polarity:
[http://www.cs.cornell.edu/people/pabo/movie-review-data/](http://www.cs.cornell.edu/people/pabo/movie-review-data/)
```
## Negative sentiment polarity example
# (result between -1 and +1)
from textblob import TextBlob
text = "This is a very mean and nasty sentence."
blob = TextBlob(text)
sentiment_score = blob.sentiment.polarity
print(sentiment_score)
## Positive sentiment polarity example
# (result between -1 and +1)
text = "This is a very nice and positive sentence."
blob = TextBlob(text)
sentiment_score = blob.sentiment.polarity
print(sentiment_score)
## Neutral polarity / not enough information
text = "What is this?"
blob = TextBlob(text)
sentiment_score = blob.sentiment.polarity
print(sentiment_score)
## High subjectivity example
# result between 0 and 1
text="This is a very mean and nasty sentence."
blob = TextBlob(text)
sentiment_score = blob.sentiment.subjectivity
print(sentiment_score)
## Low subjectivity example
# result between 0 and 1
text="This sentence states a fact, with an apparently objective adjective."
blob = TextBlob(text)
sentiment_score=blob.sentiment.subjectivity
print(sentiment_score)
```
### ▷ Plotting Sentiment Values
Let's map sentiment polarity values across the course of a full novel.
```
## Viewing Pyplot style templates
pprint(plt.style.available)
## Selecting a Pyplot style
plt.style.use('ggplot')
# The 'ggplot' style imitates the R graphing package 'ggplot2.' (http://ggplot2.org)
austen_sentiments = [item.sentiment.polarity for item in austen_blob.sentences]
austen_sentiments[:15]
## Austen sentiment values for first 60 sentences
plt.figure(figsize=(18,8))
plt.plot(austen_sentiments[:60])
austen_blob.sentences[30]
austen_blob.sentences[37]
## Plotting 'Pride and Prejudice' sentence sentiment values over full novel
plt.figure(figsize=(18,8))
plt.plot(austen_sentiments)
plt.show()
## Finding the most 'positive' sentences in 'Pride and Prejudice' and printing them
max_sentiment = max(austen_sentiments)
print(max_sentiment) # max sentiment polarity value
print()
for sentence in austen_blob.sentences:
if sentence.sentiment.polarity == max_sentiment:
print(sentence)
print()
## Finding the most 'negative' sentences in 'Pride and Prejudice' and printing them
min_sentiment = min(austen_sentiments)
print(min_sentiment) # max sentiment polarity value
print()
for sentence in austen_blob.sentences:
if sentence.sentiment.polarity == min_sentiment:
print(sentence)
print()
## Example: smoothing a list of numbers using the 'pandas' package
some_values = [5, 4, 5, 6, 6, 7, 6, 19, 4, 4, 3, 3, 3, 1, 5, 5, 6, 7, 0]
pandas_series = pd.Series(some_values)
list(pandas_series.rolling(window=4).mean())
## Smoothing our data before plotting
austen_sentiments_pd = pd.Series(austen_sentiments)
austen_sentiments_smooth = austen_sentiments_pd.rolling(window=200).mean()
print(austen_sentiments_smooth[190:220])
## Plotting smoothed sentiment polarity values for each sentence in 'Pride and Prejudice'
plt.figure(figsize=(18,8))
plt.plot(austen_sentiments_smooth)
plt.show()
## Comparing 'Moby Dick' sentiment values
melville_sentiments = [item.sentiment.polarity for item in melville_blob.sentences]
melville_sentiments_pd = pd.Series(melville_sentiments)
melville_sentiments_smooth = melville_sentiments_pd.rolling(window=200).mean()
plt.figure(figsize=(18,8))
plt.plot(melville_sentiments_smooth)
plt.show()
## Finding and printing the most 'negative' sentence in a list of smoothed sentiment values
min_sentiment = min(melville_sentiments_smooth[199:])
print(min_sentiment) # min sentiment polarity value
print()
min_sentiment_index = list(melville_sentiments_smooth).index(min_sentiment) # index position of the 'min_sentiment' value
print(melville_blob.sentences[min_sentiment_index])
## Finding and printing the most 'positive' sentence in a list of smoothed sentiment values
max_sentiment = max(melville_sentiments_smooth[199:])
print(max_sentiment) # max sentiment polarity value
print()
max_sentiment_index = list(melville_sentiments_smooth).index(max_sentiment) # index position of the 'min_sentiment' value
print(melville_blob.sentences[max_sentiment_index])
## Finding and printing the most 'positive' sentence in a list of smoothed sentiment values
max_sentiment = max(austen_sentiments_smooth[199:])
print(max_sentiment) # max sentiment polarity value
print()
max_sentiment_index = list(austen_sentiments_smooth).index(max_sentiment) # index position of the 'max_sentiment' value
print(austen_blob.sentences[max_sentiment_index])
## Finding and printing the most 'negative' sentence in a list of smoothed sentiment values
min_sentiment = min(austen_sentiments_smooth[199:])
print(min_sentiment) # min sentiment polarity value
print()
min_sent_index=list(austen_sentiments_smooth).index(min_sentiment) # index position of the 'min_sentiment' value
print(austen_blob.sentences[min_sent_index])
## Creating functions to expedite the steps we put together above process
# This function accepts an optional second argument for smoothing window size. The default is 200 windows.
def plot_polarity(text_path, window=200):
text_in = open(text_path).read().replace('\n', ' ')
blob = TextBlob(text_in)
sentiments = [sentence.sentiment.polarity for sentence in blob.sentences]
sentiments_pd = pd.Series(sentiments)
sentiments_smooth = sentiments_pd.rolling(window).mean()
plt.figure(figsize = (18,8))
plt.plot(sentiments_smooth)
plt.show()
!find ./
plot_polarity('George_Eliot/Silas_Marner.txt')
plot_polarity('Joseph_Conrad/Heart_of_Darkness.txt')
```
### ▷ Plotting smoothed random data (for comparison)
```
## Plotting completely random data
random_vals = np.random.rand(4000)
vals_pd = pd.Series(random_vals)
vals_smooth = vals_pd.rolling(window=200).mean()
plt.figure(figsize=(18,8))
plt.plot(vals_smooth)
```
### ▷ Working with multiple files
```
!ls *
os.chdir('/sharedfolder/Sample_corpora/Inaugural_Speeches/')
sorted(os.listdir('./'))
inaugural_filenames = sorted(os.listdir('./'))
inaugural_sentiment_values = []
for filename in inaugural_filenames:
inaugural_text = open(filename).read()
sentiment_polarity_value = TextBlob(inaugural_text).sentiment.polarity
inaugural_sentiment_values.append(sentiment_polarity_value)
print(inaugural_sentiment_values)
## Creating nicely formatted labels for the sentiment values above
inaugural_labels = [item.replace('.txt','').replace('_', ' ').title() for item in inaugural_filenames]
inaugural_labels
## Plotting presidential inaugural address sentiment values over time
plt.figure(figsize = (20,8))
plt.xticks(range(len(inaugural_sentiment_values)), inaugural_labels) # two arguments: tick positions, tick display list
plt.xticks(rotation=-85)
plt.ylabel('Sentiment Polarity Value')
plt.plot(inaugural_sentiment_values)
plt.show()
```
## ▷ Assignment
For each author in our set of corpora, which is their most 'positive' novel? Their most 'negative'?
## ▷ Sentiment Histograms
```
os.chdir('/sharedfolder/Sample_corpora/')
text_in = open('Jane_Austen/Pride_and_Prejudice.txt').read().replace('\n', ' ')
blob = TextBlob(text_in)
sentiments = [sentence.sentiment.polarity for sentence in blob.sentences]
plt.figure(figsize=(20,10))
plt.hist(sentiments, bins=25)
plt.show()
text_in = open('Jane_Austen/Pride_and_Prejudice.txt').read().replace('\n', ' ')
blob = TextBlob(text_in)
sentiments = [sentence.sentiment.subjectivity for sentence in blob.sentences]
plt.figure(figsize=(20,10))
plt.hist(sentiments, bins=25)
plt.show()
```
## ▷ Cleaning sentiment values
```
text_in = open('Jane_Austen/Pride_and_Prejudice.txt').read().replace('\n', ' ')
blob = TextBlob(text_in)
sentiments = [sentence.sentiment.polarity for sentence in blob.sentences]
sentiments_cleaned = [value for value in sentiments if value!=0]
plt.figure(figsize=(20,10))
plt.hist(sentiments_cleaned, bins=25)
plt.show()
def polarity_histogram_cleaned(text_path):
text_in = open(text_path).read().replace('\n', ' ')
blob = TextBlob(text_in)
sentiments = [sentence.sentiment.polarity for sentence in blob.sentences]
sentiments_cleaned = [value for value in sentiments if value!=0]
plt.figure(figsize=(20,10))
plt.hist(sentiments_cleaned, bins=25)
plt.show()
!find ./
polarity_histogram_cleaned('./Joseph_Conrad/The_Secret_Agent.txt')
```
## ▷ Comparing Sentiment Distributions
```
melville_blob = TextBlob(open('Herman_Melville/Moby_Dick.txt').read().replace('\n', ' '))
austen_blob = TextBlob(open('Jane_Austen/Pride_and_Prejudice.txt').read().replace('\n', ' '))
melville_sentiments = [sentence.sentiment.polarity for sentence in melville_blob.sentences]
melville_sentiments_cleaned = [value for value in melville_sentiments if value!=0.0]
austen_sentiments = [sentence.sentiment.polarity for sentence in austen_blob.sentences]
austen_sentiments_cleaned = [value for value in austen_sentiments if value!=0.0]
plt.figure(figsize=(15,8))
plt.hist(melville_sentiments_cleaned, bins=25, alpha=0.5, label='Moby Dick')
plt.hist(austen_sentiments_cleaned, bins=25, alpha=0.5, label='Pride and Prejudice')
plt.legend(loc='upper right')
plt.show()
print(np.mean(melville_sentiments_cleaned))
print(np.mean(austen_sentiments_cleaned))
```
## ▷ Statistical Tests
```
## t-test of independent values
# (used to determine whether two *normally distributed* sets of values are significantly different)
from scipy import stats
stats.ttest_ind(melville_sentiments_cleaned, austen_sentiments_cleaned)
## Mann-Whitney U test
# (used to test two sets of *non-normally distributed* values are significantly different)
stats.mannwhitneyu(melville_sentiments, austen_sentiments)
```
## ▷ Assignment
Is George Eliot significantly more subjective than Jane Austen?
Is Herman Melville significantly more 'positive' than Joseph Conrad?
## ▷ Assignment
Write a function that takes two texts' paths as arguments and
(a) plots a histogram comparing their sentences' sentiment distributions
(b) tests whether their sentiment values are significantly different
| github_jupyter |
### Introduction
This is a `View` Notebook to show an `IntSlider` widget either in an interactive Notebook or in a `Voila` Dashboard mode that will then print the [Fibonnaci sequence](https://en.wikipedia.org/wiki/Fibonacci_number) answer for that number. It will also show how long it takes each handler to calculate the number, which should demonstrate what kind of overhead is involved with `refactored code`, `PythonModel`, and `KernelModel`.
```
import ipywidgets as widgets
grid = widgets.GridspecLayout(4, 3)
# top row
input_label = widgets.Label("User Input")
user_input = widgets.IntText(value=1, description='Fibonnaci n:')
grid[0, 0] = input_label
grid[0, 1:] = user_input
# refactored code row
label1 = widgets.Label('Refactored code')
output1 = widgets.Text(disabled=True, description='Result:')
debug1 = widgets.Text(disabled=True, description='Debug:')
grid[1, 0] = label1
grid[1, 1] = output1
grid[1, 2] = debug1
# PythonModel row
label2 = widgets.Label('PythonModel')
output2 = widgets.Text(disabled=True, description='Result:')
debug2 = widgets.Text(disabled=True, description='Debug:')
grid[2, 0] = label2
grid[2, 1] = output2
grid[2, 2] = debug2
# KernelModel row
label3 = widgets.Label('KernelModel')
output3 = widgets.Text(disabled=True, description='Result:')
debug3 = widgets.Text(disabled=True, description='Debug:')
grid[3, 0] = label3
grid[3, 1] = output3
grid[3, 2] = debug3
grid
import time
### Refactored code handler
def fibonacci_generator():
"A generator that yields the last number in the sequence plus the number before that"
a, b = 0, 1
while True:
yield a
tmp_value = b
b = a + b
a = tmp_value
def handler1(ev):
start = time.time()
gen = fibonacci_generator()
n = user_input.value
for i in range(n+1):
answer = next(gen)
output1.value = str(answer)
debug1.value = 'took %.4f seconds' % (time.time() - start)
user_input.observe(handler1, names='value')
### Create PythonModel and KernelModel objects
import notebook_restified
pm = notebook_restified.PythonModel('model.ipynb')
km = notebook_restified.KernelModel('model.ipynb')
### PythonModel handler
def handler2(ev):
start = time.time()
params = {'n' : user_input.value}
result = pm.execute(params)
output2.value = str(result)
debug2.value = 'took %.4f seconds' % (time.time() - start)
user_input.observe(handler2, names='value')
### KernelModel handler
def handler3(ev):
start = time.time()
params = {'n' : user_input.value}
result = km.execute(params)
output3.value = str(result)
debug3.value = 'took %.4f seconds' % (time.time() - start)
user_input.observe(handler3, names='value')
```
| github_jupyter |
## 1. Meet Dr. Ignaz Semmelweis
<p><img style="float: left;margin:5px 20px 5px 1px" src="https://s3.amazonaws.com/assets.datacamp.com/production/project_20/img/ignaz_semmelweis_1860.jpeg"></p>
<!--
<img style="float: left;margin:5px 20px 5px 1px" src="https://s3.amazonaws.com/assets.datacamp.com/production/project_20/datasets/ignaz_semmelweis_1860.jpeg">
-->
<p>This is Dr. Ignaz Semmelweis, a Hungarian physician born in 1818 and active at the Vienna General Hospital. If Dr. Semmelweis looks troubled it's probably because he's thinking about <em>childbed fever</em>: A deadly disease affecting women that just have given birth. He is thinking about it because in the early 1840s at the Vienna General Hospital as many as 10% of the women giving birth die from it. He is thinking about it because he knows the cause of childbed fever: It's the contaminated hands of the doctors delivering the babies. And they won't listen to him and <em>wash their hands</em>!</p>
<p>In this notebook, we're going to reanalyze the data that made Semmelweis discover the importance of <em>handwashing</em>. Let's start by looking at the data that made Semmelweis realize that something was wrong with the procedures at Vienna General Hospital.</p>
```
# importing modules
import pandas as pd
# Read datasets/yearly_deaths_by_clinic.csv into yearly
yearly = pd.read_csv('datasets/yearly_deaths_by_clinic.csv')
# Print out yearly
print(yearly)
```
## 2. The alarming number of deaths
<p>The table above shows the number of women giving birth at the two clinics at the Vienna General Hospital for the years 1841 to 1846. You'll notice that giving birth was very dangerous; an <em>alarming</em> number of women died as the result of childbirth, most of them from childbed fever.</p>
<p>We see this more clearly if we look at the <em>proportion of deaths</em> out of the number of women giving birth. Let's zoom in on the proportion of deaths at Clinic 1.</p>
```
# Calculate proportion of deaths per no. births
yearly["proportion_deaths"]=yearly['deaths']/yearly['births']
# Extract clinic 1 data into yearly1 and clinic 2 data into yearly2
yearly1 = yearly[yearly["clinic"] == "clinic 1"]
yearly2 = yearly[yearly["clinic"] == "clinic 2"]
# Print out yearly1
print(yearly1)
```
## 3. Death at the clinics
<p>If we now plot the proportion of deaths at both clinic 1 and clinic 2 we'll see a curious pattern...</p>
```
# This makes plots appear in the notebook
%matplotlib inline
import matplotlib.pyplot as plt
# Plot yearly proportion of deaths at the two clinics
ax = yearly1.plot(x="year",
y="proportion_deaths",
label="Clinic 1")
yearly2.plot(x="year", y="proportion_deaths",
label="Clinic 2", ax=ax)
ax.set_ylabel("Proportion deaths")
```
## 4. The handwashing begins
<p>Why is the proportion of deaths constantly so much higher in Clinic 1? Semmelweis saw the same pattern and was puzzled and distressed. The only difference between the clinics was that many medical students served at Clinic 1, while mostly midwife students served at Clinic 2. While the midwives only tended to the women giving birth, the medical students also spent time in the autopsy rooms examining corpses. </p>
<p>Semmelweis started to suspect that something on the corpses, spread from the hands of the medical students, caused childbed fever. So in a desperate attempt to stop the high mortality rates, he decreed: <em>Wash your hands!</em> This was an unorthodox and controversial request, nobody in Vienna knew about bacteria at this point in time. </p>
<p>Let's load in monthly data from Clinic 1 to see if the handwashing had any effect.</p>
```
# Read datasets/monthly_deaths.csv into monthly
monthly = pd.read_csv("datasets/monthly_deaths.csv", parse_dates=["date"])
# Calculate proportion of deaths per no. births
monthly["proportion_deaths"] = monthly['deaths']/ monthly['births']
# Print out the first rows in monthly
monthly.head()
```
## 5. The effect of handwashing
<p>With the data loaded we can now look at the proportion of deaths over time. In the plot below we haven't marked where obligatory handwashing started, but it reduced the proportion of deaths to such a degree that you should be able to spot it!</p>
```
# Plot monthly proportion of deaths
ax = monthly.plot(x="date", y="proportion_deaths")
ax.set_ylabel("Proportion deaths")
```
## 6. The effect of handwashing highlighted
<p>Starting from the summer of 1847 the proportion of deaths is drastically reduced and, yes, this was when Semmelweis made handwashing obligatory. </p>
<p>The effect of handwashing is made even more clear if we highlight this in the graph.</p>
```
# Date when handwashing was made mandatory
import pandas as pd
handwashing_start = pd.to_datetime('1847-06-01')
# Split monthly into before and after handwashing_start
before_washing = monthly[monthly["date"] < handwashing_start]
after_washing = monthly[monthly["date"] >= handwashing_start]
# Plot monthly proportion of deaths before and after handwashing
ax = before_washing.plot(x="date", y="proportion_deaths",
label="Before handwashing")
after_washing.plot(x="date", y="proportion_deaths",
label="After handwashing", ax=ax)
ax.set_ylabel("Proportion deaths")
```
## 7. More handwashing, fewer deaths?
<p>Again, the graph shows that handwashing had a huge effect. How much did it reduce the monthly proportion of deaths on average?</p>
```
# Difference in mean monthly proportion of deaths due to handwashing
before_proportion = before_washing["proportion_deaths"]
after_proportion = after_washing["proportion_deaths"]
mean_diff = after_proportion.mean() - before_proportion.mean()
mean_diff
```
## 8. A Bootstrap analysis of Semmelweis handwashing data
<p>It reduced the proportion of deaths by around 8 percentage points! From 10% on average to just 2% (which is still a high number by modern standards). </p>
<p>To get a feeling for the uncertainty around how much handwashing reduces mortalities we could look at a confidence interval (here calculated using the bootstrap method).</p>
```
# A bootstrap analysis of the reduction of deaths due to handwashing
boot_mean_diff = []
for i in range(3000):
boot_before = before_proportion.sample(frac=1, replace=True)
boot_after = after_proportion.sample(frac=1, replace=True)
boot_mean_diff.append( boot_after.mean() - boot_before.mean() )
# Calculating a 95% confidence interval from boot_mean_diff
confidence_interval = pd.Series(boot_mean_diff).quantile([0.025, 0.975])
confidence_interval
```
## 9. The fate of Dr. Semmelweis
<p>So handwashing reduced the proportion of deaths by between 6.7 and 10 percentage points, according to a 95% confidence interval. All in all, it would seem that Semmelweis had solid evidence that handwashing was a simple but highly effective procedure that could save many lives.</p>
<p>The tragedy is that, despite the evidence, Semmelweis' theory — that childbed fever was caused by some "substance" (what we today know as <em>bacteria</em>) from autopsy room corpses — was ridiculed by contemporary scientists. The medical community largely rejected his discovery and in 1849 he was forced to leave the Vienna General Hospital for good.</p>
<p>One reason for this was that statistics and statistical arguments were uncommon in medical science in the 1800s. Semmelweis only published his data as long tables of raw data, but he didn't show any graphs nor confidence intervals. If he would have had access to the analysis we've just put together he might have been more successful in getting the Viennese doctors to wash their hands.</p>
```
# The data Semmelweis collected points to that:
doctors_should_wash_their_hands = True
```
| github_jupyter |
<center>
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width="300" alt="cognitiveclass.ai logo" />
</center>
# Pie Charts, Box Plots, Scatter Plots, and Bubble Plots
Estimated time needed: **30** minutes
## Objectives
After completing this lab you will be able to:
- Explore Matplotlib library further
- Create pie charts, box plots, scatter plots and bubble charts
## Table of Contents
<div class="alert alert-block alert-info" style="margin-top: 20px">
1. [Exploring Datasets with _p_andas](#0)<br>
2. [Downloading and Prepping Data](#2)<br>
3. [Visualizing Data using Matplotlib](#4) <br>
4. [Pie Charts](#6) <br>
5. [Box Plots](#8) <br>
6. [Scatter Plots](#10) <br>
7. [Bubble Plots](#12) <br>
</div>
<hr>
# Exploring Datasets with _pandas_ and Matplotlib<a id="0"></a>
Toolkits: The course heavily relies on [_pandas_](http://pandas.pydata.org?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) and [**Numpy**](http://www.numpy.org?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) for data wrangling, analysis, and visualization. The primary plotting library we will explore in the course is [Matplotlib](http://matplotlib.org?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ).
Dataset: Immigration to Canada from 1980 to 2013 - [International migration flows to and from selected countries - The 2015 revision](http://www.un.org/en/development/desa/population/migration/data/empirical2/migrationflows.shtml?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) from United Nation's website.
The dataset contains annual data on the flows of international migrants as recorded by the countries of destination. The data presents both inflows and outflows according to the place of birth, citizenship or place of previous / next residence both for foreigners and nationals. In this lab, we will focus on the Canadian Immigration data.
# Downloading and Prepping Data <a id="2"></a>
Import primary modules.
```
import numpy as np # useful for many scientific computing in Python
import pandas as pd # primary data structure library
```
Let's download and import our primary Canadian Immigration dataset using _pandas_ `read_excel()` method. Normally, before we can do that, we would need to download a module which _pandas_ requires to read in excel files. This module is **xlrd**. For your convenience, we have pre-installed this module, so you would not have to worry about that. Otherwise, you would need to run the following line of code to install the **xlrd** module:
```
!conda install -c anaconda xlrd --yes
```
Download the dataset and read it into a _pandas_ dataframe.
```
df_can = pd.read_excel('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/Canada.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skipfooter=2
)
print('Data downloaded and read into a dataframe!')
```
Let's take a look at the first five items in our dataset.
```
df_can.head()
```
Let's find out how many entries there are in our dataset.
```
# print the dimensions of the dataframe
print(df_can.shape)
```
Clean up data. We will make some modifications to the original dataset to make it easier to create our visualizations. Refer to _Introduction to Matplotlib and Line Plots_ and _Area Plots, Histograms, and Bar Plots_ for a detailed description of this preprocessing.
```
# clean up the dataset to remove unnecessary columns (eg. REG)
df_can.drop(['AREA', 'REG', 'DEV', 'Type', 'Coverage'], axis=1, inplace=True)
# let's rename the columns so that they make sense
df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent','RegName':'Region'}, inplace=True)
# for sake of consistency, let's also make all column labels of type string
df_can.columns = list(map(str, df_can.columns))
# set the country name as index - useful for quickly looking up countries using .loc method
df_can.set_index('Country', inplace=True)
# add total column
df_can['Total'] = df_can.sum(axis=1)
# years that we will be using in this lesson - useful for plotting later on
years = list(map(str, range(1980, 2014)))
print('data dimensions:', df_can.shape)
```
# Visualizing Data using Matplotlib<a id="4"></a>
Import `Matplotlib`.
```
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use('ggplot') # optional: for ggplot-like style
# check for latest version of Matplotlib
print('Matplotlib version: ', mpl.__version__) # >= 2.0.0
```
# Pie Charts <a id="6"></a>
A `pie chart` is a circualr graphic that displays numeric proportions by dividing a circle (or pie) into proportional slices. You are most likely already familiar with pie charts as it is widely used in business and media. We can create pie charts in Matplotlib by passing in the `kind=pie` keyword.
Let's use a pie chart to explore the proportion (percentage) of new immigrants grouped by continents for the entire time period from 1980 to 2013.
Step 1: Gather data.
We will use _pandas_ `groupby` method to summarize the immigration data by `Continent`. The general process of `groupby` involves the following steps:
1. **Split:** Splitting the data into groups based on some criteria.
2. **Apply:** Applying a function to each group independently:
.sum()
.count()
.mean()
.std()
.aggregate()
.apply()
.etc..
3. **Combine:** Combining the results into a data structure.
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Images/Mod3Fig4SplitApplyCombine.png" height=400 align="center">
```
# group countries by continents and apply sum() function
df_continents = df_can.groupby('Continent', axis=0).sum()
# note: the output of the groupby method is a `groupby' object.
# we can not use it further until we apply a function (eg .sum())
print(type(df_can.groupby('Continent', axis=0)))
df_continents.head()
```
Step 2: Plot the data. We will pass in `kind = 'pie'` keyword, along with the following additional parameters:
- `autopct` - is a string or function used to label the wedges with their numeric value. The label will be placed inside the wedge. If it is a format string, the label will be `fmt%pct`.
- `startangle` - rotates the start of the pie chart by angle degrees counterclockwise from the x-axis.
- `shadow` - Draws a shadow beneath the pie (to give a 3D feel).
```
# autopct create %, start angle represent starting point
df_continents['Total'].plot(kind='pie',
figsize=(5, 6),
autopct='%1.1f%%', # add in percentages
startangle=90, # start angle 90° (Africa)
shadow=True, # add shadow
)
plt.title('Immigration to Canada by Continent [1980 - 2013]')
plt.axis('equal') # Sets the pie chart to look like a circle.
plt.show()
```
The above visual is not very clear, the numbers and text overlap in some instances. Let's make a few modifications to improve the visuals:
- Remove the text labels on the pie chart by passing in `legend` and add it as a seperate legend using `plt.legend()`.
- Push out the percentages to sit just outside the pie chart by passing in `pctdistance` parameter.
- Pass in a custom set of colors for continents by passing in `colors` parameter.
- **Explode** the pie chart to emphasize the lowest three continents (Africa, North America, and Latin America and Carribbean) by pasing in `explode` parameter.
```
colors_list = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'lightgreen', 'pink']
explode_list = [0.1, 0, 0, 0, 0.1, 0.1] # ratio for each continent with which to offset each wedge.
df_continents['Total'].plot(kind='pie',
figsize=(15, 6),
autopct='%1.1f%%',
startangle=90,
shadow=True,
labels=None, # turn off labels on pie chart
pctdistance=1.12, # the ratio between the center of each pie slice and the start of the text generated by autopct
colors=colors_list, # add custom colors
explode=explode_list # 'explode' lowest 3 continents
)
# scale the title up by 12% to match pctdistance
plt.title('Immigration to Canada by Continent [1980 - 2013]', y=1.12)
plt.axis('equal')
# add legend
plt.legend(labels=df_continents.index, loc='upper left')
plt.show()
```
**Question:** Using a pie chart, explore the proportion (percentage) of new immigrants grouped by continents in the year 2013.
**Note**: You might need to play with the explore values in order to fix any overlapping slice values.
```
### type your answer here
explode_list = [0.0, 0, 0, 0.1, 0.1, 0.2] # ratio for each continent with which to offset each wedge.
df_continents['2013'].plot(kind='pie',
figsize=(15, 6),
autopct='%1.1f%%',
startangle=90,
shadow=True,
labels=None, # turn off labels on pie chart
pctdistance=1.12, # the ratio between the pie center and start of text label
explode=explode_list # 'explode' lowest 3 continents
)
# scale the title up by 12% to match pctdistance
plt.title('Immigration to Canada by Continent in 2013', y=1.12)
plt.axis('equal')
# add legend
plt.legend(labels=df_continents.index, loc='upper left')
# show plot
plt.show()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
explode_list = [0.0, 0, 0, 0.1, 0.1, 0.2] # ratio for each continent with which to offset each wedge.
df_continents['2013'].plot(kind='pie',
figsize=(15, 6),
autopct='%1.1f%%',
startangle=90,
shadow=True,
labels=None, # turn off labels on pie chart
pctdistance=1.12, # the ratio between the pie center and start of text label
explode=explode_list # 'explode' lowest 3 continents
)
# scale the title up by 12% to match pctdistance
plt.title('Immigration to Canada by Continent in 2013', y=1.12)
plt.axis('equal')
# add legend
plt.legend(labels=df_continents.index, loc='upper left')
# show plot
plt.show()
```
</details>
# Box Plots <a id="8"></a>
A `box plot` is a way of statistically representing the _distribution_ of the data through five main dimensions:
- **Minimun:** Smallest number in the dataset excluding the outliers.
- **First quartile:** Middle number between the `minimum` and the `median`.
- **Second quartile (Median):** Middle number of the (sorted) dataset.
- **Third quartile:** Middle number between `median` and `maximum`.
- **Maximum:** Highest number in the dataset excluding the outliers.
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Images/boxplot_complete.png" width=440, align="center">
To make a `box plot`, we can use `kind=box` in `plot` method invoked on a _pandas_ series or dataframe.
Let's plot the box plot for the Japanese immigrants between 1980 - 2013.
Step 1: Get the dataset. Even though we are extracting the data for just one country, we will obtain it as a dataframe. This will help us with calling the `dataframe.describe()` method to view the percentiles.
```
# to get a dataframe, place extra square brackets around 'Japan'.
df_japan = df_can.loc[['Japan'], years].transpose()
df_japan.head()
```
Step 2: Plot by passing in `kind='box'`.
```
df_japan.plot(kind='box', figsize=(8, 6))
plt.title('Box plot of Japanese Immigrants from 1980 - 2013')
plt.ylabel('Number of Immigrants')
plt.show()
```
We can immediately make a few key observations from the plot above:
1. The minimum number of immigrants is around 200 (min), maximum number is around 1300 (max), and median number of immigrants is around 900 (median).
2. 25% of the years for period 1980 - 2013 had an annual immigrant count of ~500 or fewer (First quartile).
3. 75% of the years for period 1980 - 2013 had an annual immigrant count of ~1100 or fewer (Third quartile).
We can view the actual numbers by calling the `describe()` method on the dataframe.
```
df_japan.describe()
```
One of the key benefits of box plots is comparing the distribution of multiple datasets. In one of the previous labs, we observed that China and India had very similar immigration trends. Let's analyize these two countries further using box plots.
**Question:** Compare the distribution of the number of new immigrants from India and China for the period 1980 - 2013.
Step 1: Get the dataset for China and India and call the dataframe **df_CI**.
```
### type your answer here
df_CI= df_can.loc[['China', 'India'], years].transpose()
df_CI.head()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
df_CI= df_can.loc[['China', 'India'], years].transpose()
df_CI.head()
```
</details>
Let's view the percentages associated with both countries using the `describe()` method.
```
### type your answer here
df_CI.describe()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
df_CI.describe()
```
</details>
Step 2: Plot data.
```
### type your answer here
df_CI.plot(kind='box', figsize=(10, 7))
plt.title('Box plot of China and India Immigrants from 1980 - 2013')
plt.ylabel('Number of Immigrants')
plt.show()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
df_CI.plot(kind='box', figsize=(10, 7))
plt.title('Box plots of Immigrants from China and India (1980 - 2013)')
plt.ylabel('Number of Immigrants')
plt.show()
```
</details>
We can observe that, while both countries have around the same median immigrant population (~20,000), China's immigrant population range is more spread out than India's. The maximum population from India for any year (36,210) is around 15% lower than the maximum population from China (42,584).
If you prefer to create horizontal box plots, you can pass the `vert` parameter in the **plot** function and assign it to _False_. You can also specify a different color in case you are not a big fan of the default red color.
```
# horizontal box plots
df_CI.plot(kind='box', figsize=(10, 7), color='blue', vert=False)
plt.title('Box plots of Immigrants from China and India (1980 - 2013)')
plt.xlabel('Number of Immigrants')
plt.show()
```
**Subplots**
Often times we might want to plot multiple plots within the same figure. For example, we might want to perform a side by side comparison of the box plot with the line plot of China and India's immigration.
To visualize multiple plots together, we can create a **`figure`** (overall canvas) and divide it into **`subplots`**, each containing a plot. With **subplots**, we usually work with the **artist layer** instead of the **scripting layer**.
Typical syntax is : <br>
```python
fig = plt.figure() # create figure
ax = fig.add_subplot(nrows, ncols, plot_number) # create subplots
```
Where
- `nrows` and `ncols` are used to notionally split the figure into (`nrows` * `ncols`) sub-axes,
- `plot_number` is used to identify the particular subplot that this function is to create within the notional grid. `plot_number` starts at 1, increments across rows first and has a maximum of `nrows` * `ncols` as shown below.
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Images/Mod3Fig5Subplots_V2.png" width=500 align="center">
We can then specify which subplot to place each plot by passing in the `ax` paramemter in `plot()` method as follows:
```
fig = plt.figure() # create figure
ax0 = fig.add_subplot(1, 2, 1) # add subplot 1 (1 row, 2 columns, first plot)
ax1 = fig.add_subplot(1, 2, 2) # add subplot 2 (1 row, 2 columns, second plot). See tip below**
# Subplot 1: Box plot
df_CI.plot(kind='box', color='blue', vert=False, figsize=(20, 6), ax=ax0) # add to subplot 1
ax0.set_title('Box Plots of Immigrants from China and India (1980 - 2013)')
ax0.set_xlabel('Number of Immigrants')
ax0.set_ylabel('Countries')
# Subplot 2: Line plot
df_CI.plot(kind='line', figsize=(20, 6), ax=ax1) # add to subplot 2
ax1.set_title ('Line Plots of Immigrants from China and India (1980 - 2013)')
ax1.set_ylabel('Number of Immigrants')
ax1.set_xlabel('Years')
plt.show()
```
** * Tip regarding subplot convention **
In the case when `nrows`, `ncols`, and `plot_number` are all less than 10, a convenience exists such that the a 3 digit number can be given instead, where the hundreds represent `nrows`, the tens represent `ncols` and the units represent `plot_number`. For instance,
```python
subplot(211) == subplot(2, 1, 1)
```
produces a subaxes in a figure which represents the top plot (i.e. the first) in a 2 rows by 1 column notional grid (no grid actually exists, but conceptually this is how the returned subplot has been positioned).
Let's try something a little more advanced.
Previously we identified the top 15 countries based on total immigration from 1980 - 2013.
**Question:** Create a box plot to visualize the distribution of the top 15 countries (based on total immigration) grouped by the _decades_ `1980s`, `1990s`, and `2000s`.
Step 1: Get the dataset. Get the top 15 countries based on Total immigrant population. Name the dataframe **df_top15**.
```
### type your answer here
df_top15=df_can.sort_values(by='Total', ascending=False, axis=0).head(15)
df_top15
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
df_top15 = df_can.sort_values(['Total'], ascending=False, axis=0).head(15)
df_top15
```
</details>
Step 2: Create a new dataframe which contains the aggregate for each decade. One way to do that:
1. Create a list of all years in decades 80's, 90's, and 00's.
2. Slice the original dataframe df_can to create a series for each decade and sum across all years for each country.
3. Merge the three series into a new data frame. Call your dataframe **new_df**.
```
### type your answer here
# create a list of all years in decades 80's, 90's, and 00's
years_80s = list(map(str, range(1980, 1990)))
years_90s = list(map(str, range(1990, 2000)))
years_00s = list(map(str, range(2000, 2010)))
# slice the original dataframe df_can to create a series for each decade
df_80s = df_top15.loc[:, years_80s].sum(axis=1)
df_90s = df_top15.loc[:, years_90s].sum(axis=1)
df_00s = df_top15.loc[:, years_00s].sum(axis=1)
# merge the three series into a new data frame
new_df = pd.DataFrame({'1980s': df_80s, '1990s': df_90s, '2000s':df_00s})
# display dataframe
new_df.head()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# create a list of all years in decades 80's, 90's, and 00's
years_80s = list(map(str, range(1980, 1990)))
years_90s = list(map(str, range(1990, 2000)))
years_00s = list(map(str, range(2000, 2010)))
# slice the original dataframe df_can to create a series for each decade
df_80s = df_top15.loc[:, years_80s].sum(axis=1)
df_90s = df_top15.loc[:, years_90s].sum(axis=1)
df_00s = df_top15.loc[:, years_00s].sum(axis=1)
# merge the three series into a new data frame
new_df = pd.DataFrame({'1980s': df_80s, '1990s': df_90s, '2000s':df_00s})
# display dataframe
new_df.head()
```
</details>
Let's learn more about the statistics associated with the dataframe using the `describe()` method.
```
### type your answer here
new_df.describe()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
new_df.describe()
```
</details>
Step 3: Plot the box plots.
```
### type your answer here
new_df.plot(kind='box', figsize=(10, 6), color='blue', vert=False)
plt.title('Immigration from top 15 countries for decades 80s, 90s and 2000s')
plt.xlabel('Number of Immigrants')
plt.show()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
new_df.plot(kind='box', figsize=(10, 6))
plt.title('Immigration from top 15 countries for decades 80s, 90s and 2000s')
plt.show()
```
</details>
Note how the box plot differs from the summary table created. The box plot scans the data and identifies the outliers. In order to be an outlier, the data value must be:<br>
- larger than Q3 by at least 1.5 times the interquartile range (IQR), or,
- smaller than Q1 by at least 1.5 times the IQR.
Let's look at decade 2000s as an example: <br>
- Q1 (25%) = 36,101.5 <br>
- Q3 (75%) = 105,505.5 <br>
- IQR = Q3 - Q1 = 69,404 <br>
Using the definition of outlier, any value that is greater than Q3 by 1.5 times IQR will be flagged as outlier.
Outlier > 105,505.5 + (1.5 * 69,404) <br>
Outlier > 209,611.5
```
# let's check how many entries fall above the outlier threshold
new_df=new_df.reset_index()
new_df[new_df['2000s']> 209611.5]
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
new_df=new_df.reset_index()
new_df[new_df['2000s']> 209611.5]
```
</details>
<!-- The correct answer is:
new_df[new_df['2000s']> 209611.5]
-->
China and India are both considered as outliers since their population for the decade exceeds 209,611.5.
The box plot is an advanced visualizaiton tool, and there are many options and customizations that exceed the scope of this lab. Please refer to [Matplotlib documentation](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) on box plots for more information.
# Scatter Plots <a id="10"></a>
A `scatter plot` (2D) is a useful method of comparing variables against each other. `Scatter` plots look similar to `line plots` in that they both map independent and dependent variables on a 2D graph. While the datapoints are connected together by a line in a line plot, they are not connected in a scatter plot. The data in a scatter plot is considered to express a trend. With further analysis using tools like regression, we can mathematically calculate this relationship and use it to predict trends outside the dataset.
Let's start by exploring the following:
Using a `scatter plot`, let's visualize the trend of total immigrantion to Canada (all countries combined) for the years 1980 - 2013.
Step 1: Get the dataset. Since we are expecting to use the relationship betewen `years` and `total population`, we will convert `years` to `int` type.
```
# we can use the sum() method to get the total population per year
df_tot = pd.DataFrame(df_can[years].sum(axis=0))
# change the years to type int (useful for regression later on)
df_tot.index = map(int, df_tot.index)
# reset the index to put in back in as a column in the df_tot dataframe
df_tot.reset_index(inplace = True)
# rename columns
df_tot.columns = ['year', 'total']
# view the final dataframe
df_tot.head()
```
Step 2: Plot the data. In `Matplotlib`, we can create a `scatter` plot set by passing in `kind='scatter'` as plot argument. We will also need to pass in `x` and `y` keywords to specify the columns that go on the x- and the y-axis.
```
df_tot.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue')
plt.title('Total Immigration to Canada from 1980 - 2013')
plt.xlabel('Year')
plt.ylabel('Number of Immigrants')
plt.show()
```
Notice how the scatter plot does not connect the datapoints together. We can clearly observe an upward trend in the data: as the years go by, the total number of immigrants increases. We can mathematically analyze this upward trend using a regression line (line of best fit).
So let's try to plot a linear line of best fit, and use it to predict the number of immigrants in 2015.
Step 1: Get the equation of line of best fit. We will use **Numpy**'s `polyfit()` method by passing in the following:
- `x`: x-coordinates of the data.
- `y`: y-coordinates of the data.
- `deg`: Degree of fitting polynomial. 1 = linear, 2 = quadratic, and so on.
```
x = df_tot['year'] # year on x-axis
y = df_tot['total'] # total on y-axis
fit = np.polyfit(x, y, deg=1)
fit
```
The output is an array with the polynomial coefficients, highest powers first. Since we are plotting a linear regression `y= a*x + b`, our output has 2 elements `[5.56709228e+03, -1.09261952e+07]` with the the slope in position 0 and intercept in position 1.
Step 2: Plot the regression line on the `scatter plot`.
```
df_tot.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue')
plt.title('Total Immigration to Canada from 1980 - 2013')
plt.xlabel('Year')
plt.ylabel('Number of Immigrants')
# plot line of best fit
plt.plot(x, fit[0] * x + fit[1], color='red') # recall that x is the Years
plt.annotate('y={0:.0f} x + {1:.0f}'.format(fit[0], fit[1]), xy=(2000, 150000))
plt.show()
# print out the line of best fit
'No. Immigrants = {0:.0f} * Year + {1:.0f}'.format(fit[0], fit[1])
```
Using the equation of line of best fit, we can estimate the number of immigrants in 2015:
```python
No. Immigrants = 5567 * Year - 10926195
No. Immigrants = 5567 * 2015 - 10926195
No. Immigrants = 291,310
```
When compared to the actuals from Citizenship and Immigration Canada's (CIC) [2016 Annual Report](http://www.cic.gc.ca/english/resources/publications/annual-report-2016/index.asp?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ), we see that Canada accepted 271,845 immigrants in 2015. Our estimated value of 291,310 is within 7% of the actual number, which is pretty good considering our original data came from United Nations (and might differ slightly from CIC data).
As a side note, we can observe that immigration took a dip around 1993 - 1997. Further analysis into the topic revealed that in 1993 Canada introcuded Bill C-86 which introduced revisions to the refugee determination system, mostly restrictive. Further amendments to the Immigration Regulations cancelled the sponsorship required for "assisted relatives" and reduced the points awarded to them, making it more difficult for family members (other than nuclear family) to immigrate to Canada. These restrictive measures had a direct impact on the immigration numbers for the next several years.
**Question**: Create a scatter plot of the total immigration from Denmark, Norway, and Sweden to Canada from 1980 to 2013?
Step 1: Get the data:
1. Create a dataframe the consists of the numbers associated with Denmark, Norway, and Sweden only. Name it **df_countries**.
2. Sum the immigration numbers across all three countries for each year and turn the result into a dataframe. Name this new dataframe **df_total**.
3. Reset the index in place.
4. Rename the columns to **year** and **total**.
5. Display the resulting dataframe.
```
### type your answer here
# create df_countries dataframe
df_countries = df_can.loc[['Denmark', 'Norway', 'Sweden'], years].transpose()
# create df_total by summing across three countries for each year
df_total = pd.DataFrame(df_countries.sum(axis=1))
# reset index in place
df_total.reset_index(inplace=True)
# rename columns
df_total.columns = ['year', 'total']
# change column year from string to int to create scatter plot
df_total['year'] = df_total['year'].astype(int)
# show resulting dataframe
df_total.head()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# create df_countries dataframe
df_countries = df_can.loc[['Denmark', 'Norway', 'Sweden'], years].transpose()
# create df_total by summing across three countries for each year
df_total = pd.DataFrame(df_countries.sum(axis=1))
# reset index in place
df_total.reset_index(inplace=True)
# rename columns
df_total.columns = ['year', 'total']
# change column year from string to int to create scatter plot
df_total['year'] = df_total['year'].astype(int)
# show resulting dataframe
df_total.head()
```
</details>
Step 2: Generate the scatter plot by plotting the total versus year in **df_total**.
```
### type your answer here
# generate scatter plot
df_total.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue')
# add title and label to axes
plt.title('Immigration from Denmark, Norway, and Sweden to Canada from 1980 - 2013')
plt.xlabel('Year')
plt.ylabel('Number of Immigrants')
# show plot
plt.show()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# generate scatter plot
df_total.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue')
# add title and label to axes
plt.title('Immigration from Denmark, Norway, and Sweden to Canada from 1980 - 2013')
plt.xlabel('Year')
plt.ylabel('Number of Immigrants')
# show plot
plt.show()
```
</details>
# Bubble Plots <a id="12"></a>
A `bubble plot` is a variation of the `scatter plot` that displays three dimensions of data (x, y, z). The datapoints are replaced with bubbles, and the size of the bubble is determined by the third variable 'z', also known as the weight. In `maplotlib`, we can pass in an array or scalar to the keyword `s` to `plot()`, that contains the weight of each point.
**Let's start by analyzing the effect of Argentina's great depression**.
Argentina suffered a great depression from 1998 - 2002, which caused widespread unemployment, riots, the fall of the government, and a default on the country's foreign debt. In terms of income, over 50% of Argentines were poor, and seven out of ten Argentine children were poor at the depth of the crisis in 2002.
Let's analyze the effect of this crisis, and compare Argentina's immigration to that of it's neighbour Brazil. Let's do that using a `bubble plot` of immigration from Brazil and Argentina for the years 1980 - 2013. We will set the weights for the bubble as the _normalized_ value of the population for each year.
Step 1: Get the data for Brazil and Argentina. Like in the previous example, we will convert the `Years` to type int and bring it in the dataframe.
```
df_can_t = df_can[years].transpose() # transposed dataframe
# cast the Years (the index) to type int
df_can_t.index = map(int, df_can_t.index)
# let's label the index. This will automatically be the column name when we reset the index
df_can_t.index.name = 'Year'
# reset index to bring the Year in as a column
df_can_t.reset_index(inplace=True)
# view the changes
df_can_t.head()
```
Step 2: Create the normalized weights.
There are several methods of normalizations in statistics, each with its own use. In this case, we will use [feature scaling](https://en.wikipedia.org/wiki/Feature_scaling?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) to bring all values into the range [0,1]. The general formula is:
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Images/Mod3Fig3FeatureScaling.png" align="center">
where _`X`_ is an original value, _`X'`_ is the normalized value. The formula sets the max value in the dataset to 1, and sets the min value to 0. The rest of the datapoints are scaled to a value between 0-1 accordingly.
```
# normalize Brazil data
norm_brazil = (df_can_t['Brazil'] - df_can_t['Brazil'].min()) / (df_can_t['Brazil'].max() - df_can_t['Brazil'].min())
# normalize Argentina data
norm_argentina = (df_can_t['Argentina'] - df_can_t['Argentina'].min()) / (df_can_t['Argentina'].max() - df_can_t['Argentina'].min())
```
Step 3: Plot the data.
- To plot two different scatter plots in one plot, we can include the axes one plot into the other by passing it via the `ax` parameter.
- We will also pass in the weights using the `s` parameter. Given that the normalized weights are between 0-1, they won't be visible on the plot. Therefore we will:
- multiply weights by 2000 to scale it up on the graph, and,
- add 10 to compensate for the min value (which has a 0 weight and therefore scale with x2000).
```
# Brazil
ax0 = df_can_t.plot(kind='scatter',
x='Year',
y='Brazil',
figsize=(14, 8),
alpha=0.5, # transparency
color='green',
s=norm_brazil * 2000 + 10, # pass in weights
xlim=(1975, 2015)
)
# Argentina
ax1 = df_can_t.plot(kind='scatter',
x='Year',
y='Argentina',
alpha=0.5,
color="blue",
s=norm_argentina * 2000 + 10,
ax = ax0
)
ax0.set_ylabel('Number of Immigrants')
ax0.set_title('Immigration from Brazil and Argentina from 1980 - 2013')
ax0.legend(['Brazil', 'Argentina'], loc='upper left', fontsize='x-large')
```
The size of the bubble corresponds to the magnitude of immigrating population for that year, compared to the 1980 - 2013 data. The larger the bubble, the more immigrants in that year.
From the plot above, we can see a corresponding increase in immigration from Argentina during the 1998 - 2002 great depression. We can also observe a similar spike around 1985 to 1993. In fact, Argentina had suffered a great depression from 1974 - 1990, just before the onset of 1998 - 2002 great depression.
On a similar note, Brazil suffered the _Samba Effect_ where the Brazilian real (currency) dropped nearly 35% in 1999. There was a fear of a South American financial crisis as many South American countries were heavily dependent on industrial exports from Brazil. The Brazilian government subsequently adopted an austerity program, and the economy slowly recovered over the years, culminating in a surge in 2010. The immigration data reflect these events.
**Question**: Previously in this lab, we created box plots to compare immigration from China and India to Canada. Create bubble plots of immigration from China and India to visualize any differences with time from 1980 to 2013. You can use **df_can_t** that we defined and used in the previous example.
Step 1: Normalize the data pertaining to China and India.
```
### type your answer here
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# normalize China data
norm_china = (df_can_t['China'] - df_can_t['China'].min()) / (df_can_t['China'].max() - df_can_t['China'].min())
# normalize India data
norm_india = (df_can_t['India'] - df_can_t['India'].min()) / (df_can_t['India'].max() - df_can_t['India'].min())
```
</details>
Step 2: Generate the bubble plots.
```
### type your answer here
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# China
ax0 = df_can_t.plot(kind='scatter',
x='Year',
y='China',
figsize=(14, 8),
alpha=0.5, # transparency
color='green',
s=norm_china * 2000 + 10, # pass in weights
xlim=(1975, 2015)
)
# India
ax1 = df_can_t.plot(kind='scatter',
x='Year',
y='India',
alpha=0.5,
color="blue",
s=norm_india * 2000 + 10,
ax = ax0
)
ax0.set_ylabel('Number of Immigrants')
ax0.set_title('Immigration from China and India from 1980 - 2013')
ax0.legend(['China', 'India'], loc='upper left', fontsize='x-large')
```
</details>
### Thank you for completing this lab!
## Author
<a href="https://www.linkedin.com/in/aklson/" target="_blank">Alex Aklson</a>
### Other Contributors
[Jay Rajasekharan](https://www.linkedin.com/in/jayrajasekharan?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)
[Ehsan M. Kermani](https://www.linkedin.com/in/ehsanmkermani?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)
[Slobodan Markovic](https://www.linkedin.com/in/slobodan-markovic?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ).
## Change Log
| Date (YYYY-MM-DD) | Version | Changed By | Change Description |
| ----------------- | ------- | ------------ | ---------------------------------- |
| 2021-01-05 | 2.4 | LakshmiHolla | Changed markdown for outliers |
| 2020-11-12 | 2.3 | LakshmiHolla | Added example code for outliers |
| 2020-11-03 | 2.2 | LakshmiHolla | Changed URL of excel file |
| 2020-09-29 | 2.1 | LakshmiHolla | Made fix to a boxplot label |
| 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab |
## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
| github_jupyter |
# Time-energy fit
3ML allows the possibility to model a time-varying source by explicitly fitting the time-dependent part of the model. Let's see this with an example.
First we import what we need:
```
from threeML import *
import matplotlib.pyplot as plt
from jupyterthemes import jtplot
%matplotlib inline
jtplot.style(context="talk", fscale=1, ticks=True, grid=False)
plt.style.use("mike")
```
## Generating the datasets
Then we generate a simulated dataset for a source with a cutoff powerlaw spectrum with a constant photon index and cutoff but with a normalization that changes with time following a powerlaw:
```
def generate_one(K, ax):
# Let's generate some data with y = Powerlaw(x)
gen_function = Cutoff_powerlaw()
gen_function.K = K
# Generate a dataset using the power law, and a
# constant 30% error
x = np.logspace(0, 2, 50)
xyl_generator = XYLike.from_function(
"sim_data", function=gen_function, x=x, yerr=0.3 * gen_function(x)
)
y = xyl_generator.y
y_err = xyl_generator.yerr
ax.loglog(x, gen_function(x))
return x, y, y_err
```
These are the times at which the simulated spectra have been observed
```
time_tags = np.array([1.0, 2.0, 5.0, 10.0])
```
This describes the time-varying normalization. If everything works as it should, we should recover from the fit a normalization of 0.23 and a index of -1.2 for the time law.
```
normalizations = 0.23 * time_tags ** (-3.5)
```
Now that we have a simple function to create the datasets, let's build them.
```
fig, ax = plt.subplots()
datasets = [generate_one(k, ax) for k in normalizations]
ax.set_xlabel("Energy")
ax.set_ylabel("Flux")
```
## Setup the model
Now set up the fit and fit it. First we need to tell 3ML that we are going to fit using an independent variable (time in this case). We init it to 1.0 and set the unit to seconds.
```
time = IndependentVariable("time", 1.0, u.s)
```
Then we load the data that we have generated, tagging them with their time of observation.
```
plugins = []
for i, dataset in enumerate(datasets):
x, y, y_err = dataset
xyl = XYLike("data%i" % i, x, y, y_err)
# This is the important part: we need to tag the instance of the
# plugin so that 3ML will know that this instance corresponds to the
# given tag (a time coordinate in this case). If instead of giving
# one time coordinate we give two time coordinates, then 3ML will
# take the average of the model between the two time coordinates
# (computed as the integral of the model between t1 and t2 divided
# by t2-t1)
xyl.tag = (time, time_tags[i])
# To access the tag we have just set we can use:
independent_variable, start, end = xyl.tag
# NOTE: xyl.tag will return 3 things: the independent variable, the start and the
# end. If like in this case you do not specify an end when assigning the tag, end
# will be None
plugins.append(xyl)
```
Generate the datalist as usual
```
data = DataList(*plugins)
```
Now let's generate the spectral model, in this case a point source with a cutoff powerlaw spectrum.
```
spectrum = Cutoff_powerlaw()
src = PointSource("test", ra=0.0, dec=0.0, spectral_shape=spectrum)
model = Model(src)
```
Now we need to tell 3ML that we are going to use the time coordinate to specify a time dependence for some of the parameters of the model.
```
model.add_independent_variable(time)
```
Now let's specify the time-dependence (a powerlaw) for the normalization of the powerlaw spectrum.
```
time_po = Powerlaw()
time_po.K.bounds = (0.01, 1000)
```
Link the normalization of the cutoff powerlaw spectrum with time through the time law we have just generated.
```
model.link(spectrum.K, time, time_po)
model
```
## Performing the fit
```
jl = JointLikelihood(model, data)
best_fit_parameters, likelihood_values = jl.fit()
for p in plugins:
p.plot(x_scale='log', y_scale='log');
```
| github_jupyter |
# Credit Risk Resampling Techniques
```
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from pathlib import Path
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from imblearn.metrics import classification_report_imbalanced
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import ClusterCentroids
from imblearn.combine import SMOTEENN
```
# Read the CSV into DataFrame
```
# Load the data
file_path = Path('Resources/lending_data.csv')
df = pd.read_csv(file_path)
df.head()
# Binary encoding 'homeowner' on in text
df = pd.get_dummies(df, columns=["homeowner"])
df.head()
```
# Split the Data into Training and Testing
```
# Create our features
X = df.drop(columns='loan_status')
# Create our target
y = df['loan_status']
X.describe()
# Check the balance of our target values
y.value_counts()
# Create X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
X_train.shape
Counter(y_train)
```
## Data Pre-Processing
Scale the training and testing data using the `StandardScaler` from `sklearn`. Remember that when scaling the data, you only scale the features data (`X_train` and `X_testing`).
```
# Create the StandardScaler instance
scaler = StandardScaler()
# Fit the Standard Scaler with the training data
# When fitting scaling functions, only train on the training dataset
X_scaler = scaler.fit(X_train)
# Scale the training and testing data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
```
# Simple Logistic Regression
```
model = LogisticRegression(solver='lbfgs', random_state=1)
model.fit(X_train, y_train)
Counter(y_train)
# Calculated the balanced accuracy score
y_pred = model.predict(X_test_scaled)
balanced_accuracy_score(y_test, y_pred)
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
```
# Oversampling
In this section, you will compare two oversampling algorithms to determine which algorithm results in the best performance. You will oversample the data using the naive random oversampling algorithm and the SMOTE algorithm. For each algorithm, be sure to complete the folliowing steps:
1. View the count of the target classes using `Counter` from the collections library.
3. Use the resampled data to train a logistic regression model.
3. Calculate the balanced accuracy score from sklearn.metrics.
4. Print the confusion matrix from sklearn.metrics.
5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests
### Naive Random Oversampling
```
# Resample the training data with the RandomOversampler
ros = RandomOverSampler(random_state=1)
X_resampled, y_resampled = ros.fit_resample(X_train, y_train)
# View the count of target classes with Counter
Counter(y_resampled)
X_resampled.head()
# Train the Logistic Regression model using the resampled data
model_Random = LogisticRegression(solver='lbfgs', random_state=1)
model_Random.fit(X_resampled, y_resampled)
X_resampled.shape
# Calculated the balanced accuracy score
y_pred_nro = model_Random.predict(X_test)
balanced_accuracy_score(y_test, y_pred_nro)
# Display the confusion matrix
cm_nro = confusion_matrix(y_test, y_pred_nro)
cm_nro
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred_nro))
```
### SMOTE Oversampling
```
# Resample the training data with SMOTE
X_resampled_smote, y_resampled_smote = SMOTE(random_state=1, sampling_strategy=1.0).fit_resample(
X_train, y_train)
# View the count of target classes with Counter
Counter(y_resampled_smote)
X_resampled_smote.head()
# Train the Logistic Regression model using the resampled data
model_smote = LogisticRegression(solver='lbfgs', random_state=1)
model_smote.fit(X_resampled_smote, y_resampled_smote)
# Calculated the balanced accuracy score
y_pred_smote = model_smote.predict(X_test)
balanced_accuracy_score(y_test, y_pred_smote)
# Display the confusion matrix
cm_smote = confusion_matrix(y_test, y_pred_smote)
cm_smote
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred_smote))
```
# Undersampling
In this section, you will test an undersampling algorithm to determine which algorithm results in the best performance compared to the oversampling algorithms above. You will undersample the data using the Cluster Centroids algorithm and complete the folliowing steps:
1. View the count of the target classes using `Counter` from the collections library.
3. Use the resampled data to train a logistic regression model.
3. Calculate the balanced accuracy score from sklearn.metrics.
4. Display the confusion matrix from sklearn.metrics.
5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests
```
# Resample the data using the ClusterCentroids resampler
cc = ClusterCentroids(random_state=1)
X_resampled_cc, y_resampled_cc = cc.fit_resample(X_train, y_train)
# View the count of target classes with Counter
Counter(y_resampled_cc)
# Train the Logistic Regression model using the resampled data
model_cc = LogisticRegression(solver='lbfgs', random_state=1)
model_cc.fit(X_resampled_cc, y_resampled_cc)
# Calculate the balanced accuracy score
y_pred_cc = model_cc.predict(X_test)
cm_cc = confusion_matrix(y_test, y_pred_cc)
# Display the confusion matrix
balanced_accuracy_score(y_test, y_pred_cc)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred_cc))
```
# Combination (Over and Under) Sampling
In this section, you will test a combination over- and under-sampling algorithm to determine if the algorithm results in the best performance compared to the other sampling algorithms above. You will resample the data using the SMOTEENN algorithm and complete the folliowing steps:
1. View the count of the target classes using `Counter` from the collections library.
3. Use the resampled data to train a logistic regression model.
3. Calculate the balanced accuracy score from sklearn.metrics.
4. Display the confusion matrix from sklearn.metrics.
5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests
```
# Resample the training data with SMOTEENN
sm = SMOTEENN(random_state=1)
X_resampled_sm, y_resampled_sm = sm.fit_resample(X_train, y_train)
# View the count of target classes with Counter
Counter(y_resampled_sm)
# Train the Logistic Regression model using the resampled data
model_sm = LogisticRegression(solver='lbfgs', random_state=1)
model_sm.fit(X_resampled_sm, y_resampled_sm)
# Calculate the balanced accuracy score
y_pred_sm = model_sm.predict(X_test)
cm_sm = confusion_matrix(y_test, y_pred_sm)
# Display the confusion matrix
balanced_accuracy_score(y_test, y_pred_sm)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred_sm))
```
# Final Questions
1. Which model had the best balanced accuracy score?
SMOTE oversampling and Random Naive Oversampling with same balanced accuracy score has the best score and it looks like the best model.
2. Which model had the best recall score?
Almost all the models have similar recall scores.
3. Which model had the best geometric mean score?
Geometric mean is also very similar to all the models.
P.S : Had referred to the instructer on this similarility.
| github_jupyter |
## Predicting Survival on the Titanic
### History
Perhaps one of the most infamous shipwrecks in history, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 people on board. Interestingly, by analysing the probability of survival based on few attributes like gender, age, and social status, we can make very accurate predictions on which passengers would survive. Some groups of people were more likely to survive than others, such as women, children, and the upper-class. Therefore, we can learn about the society priorities and privileges at the time.
### Assignment:
Build a Machine Learning Pipeline, to engineer the features in the data set and predict who is more likely to Survive the catastrophe.
Follow the Jupyter notebook below, and complete the missing bits of code, to achieve each one of the pipeline steps.
```
import re
# to handle datasets
import pandas as pd
import numpy as np
# for visualization
import matplotlib.pyplot as plt
# to divide train and test set
from sklearn.model_selection import train_test_split
# feature scaling
from sklearn.preprocessing import StandardScaler
# to build the models
from sklearn.linear_model import LogisticRegression
# to evaluate the models
from sklearn.metrics import accuracy_score, roc_auc_score
# to persist the model and the scaler
import joblib
# to visualise al the columns in the dataframe
pd.pandas.set_option('display.max_columns', None)
```
## Prepare the data set
```
# load the data - it is available open source and online
data = pd.read_csv('https://www.openml.org/data/get_csv/16826755/phpMYEkMl')
# display data
data.head()
# replace interrogation marks by NaN values
data = data.replace('?', np.nan)
# retain only the first cabin if more than
# 1 are available per passenger
def get_first_cabin(row):
try:
return row.split()[0]
except:
return np.nan
data['cabin'] = data['cabin'].apply(get_first_cabin)
# extracts the title (Mr, Ms, etc) from the name variable
def get_title(passenger):
line = passenger
if re.search('Mrs', line):
return 'Mrs'
elif re.search('Mr', line):
return 'Mr'
elif re.search('Miss', line):
return 'Miss'
elif re.search('Master', line):
return 'Master'
else:
return 'Other'
data['title'] = data['name'].apply(get_title)
# cast numerical variables as floats
data['fare'] = data['fare'].astype('float')
data['age'] = data['age'].astype('float')
# drop unnecessary variables
data.drop(labels=['name','ticket', 'boat', 'body','home.dest'], axis=1, inplace=True)
# display data
data.head()
# save the data set
data.to_csv('titanic.csv', index=False)
```
## Data Exploration
### Find numerical and categorical variables
```
data = pd.read_csv('titanic.csv')
target = 'survived'
vars_num = [var for var in data.columns if data[var].dtypes != 'O' and data[var].nunique() > 20]
vars_cat = [var for var in data.columns if var not in vars_num and var != 'survived']
print('Number of numerical variables: {}'.format(len(vars_num)))
print('Number of categorical variables: {}'.format(len(vars_cat)))
```
### Find missing values in variables
```
# first in numerical variables
vars_num_with_na = [var for var in vars_num if data[var].isnull().sum() > 0]
print(vars_num_with_na)
# now in categorical variables
vars_cat_with_na = [var for var in vars_cat if data[var].isnull().sum() > 0]
print(vars_cat_with_na)
```
### Determine cardinality of categorical variables
```
data[vars_cat].nunique()
```
### Determine the distribution of numerical variables
```
for var in vars_num:
data[var].hist(bins=30)
plt.ylabel('Number of passengers')
plt.xlabel(var)
plt.title(var)
plt.show()
```
## Separate data into train and test
Use the code below for reproducibility. Don't change it.
```
X_train, X_test, y_train, y_test = train_test_split(
data.drop('survived', axis=1), # predictors
data['survived'], # target
test_size=0.2, # percentage of obs in test set
random_state=0) # seed to ensure reproducibility
X_train.shape, X_test.shape
```
## Feature Engineering
### Extract only the letter (and drop the number) from the variable Cabin
```
X_train['cabin'] = X_train['cabin'].str.replace('\\d', '')
X_test['cabin'] = X_test['cabin'].str.replace('\\d', '')
```
### Fill in Missing data in numerical variables:
- Add a binary missing indicator
- Fill NA in original variable with the median
```
for var in vars_num_with_na:
var_med = X_train[var].median()
X_train[var + '_na'] = np.where(X_train[var].isnull(), 1, 0)
X_test[var + '_na'] = np.where(X_test[var].isnull(), 1, 0)
X_train[var] = X_train[var].fillna(var_med)
X_test[var] = X_test[var].fillna(var_med)
```
### Replace Missing data in categorical variables with the string **Missing**
```
for var in vars_cat_with_na:
X_train[var] = X_train[var].fillna('Missing')
X_test[var] = X_test[var].fillna('Missing')
```
### Remove rare labels in categorical variables
- remove labels present in less than 5 % of the passengers
```
def find_frequent_labels(df, var, rare_perc):
# function finds the labels that are shared by more than
# a certain % of the houses in the dataset
df = df.copy()
tmp = df[var].value_counts() / len(df)
return tmp[tmp > rare_perc].index
for var in vars_cat:
# find the frequent categories
frequent_ls = find_frequent_labels(X_train, var, 0.05)
print(var)
print(frequent_ls)
print()
# replace rare categories by the string "Rare"
X_train[var] = np.where(X_train[var].isin(
frequent_ls), X_train[var], 'Rare')
X_test[var] = np.where(X_test[var].isin(
frequent_ls), X_test[var], 'Rare')
```
### Perform one hot encoding of categorical variables into k-1 binary variables
- k-1, means that if the variable contains 9 different categories, we create 8 different binary variables
- Remember to drop the original categorical variable (the one with the strings) after the encoding
```
X_train = pd.concat([X_train, pd.get_dummies(X_train[vars_cat], drop_first=True)], axis=1).drop(vars_cat, axis=1)
X_test = pd.concat([X_test, pd.get_dummies(X_test[vars_cat], drop_first=True)], axis=1).drop(vars_cat, axis=1)
for var in X_train.columns:
if var not in X_test.columns:
X_test[var] = 0
```
### Scale the variables
- Use the standard scaler from Scikit-learn
```
scl = StandardScaler()
scl.fit(X_train)
X_train = scl.transform(X_train)
X_test = scl.transform(X_test)
```
## Train the Logistic Regression model
- Set the regularization parameter to 0.0005
- Set the seed to 0
```
log_model = LogisticRegression(C=0.0005, random_state=0)
log_model.fit(X_train, y_train)
```
## Make predictions and evaluate model performance
Determine:
- roc-auc
- accuracy
**Important, remember that to determine the accuracy, you need the outcome 0, 1, referring to survived or not. But to determine the roc-auc you need the probability of survival.**
```
test_pred = log_model.predict(X_test)
test_prob = log_model.predict_proba(X_test)
print(roc_auc_score(y_test, test_prob[:, 1]))
print(accuracy_score(y_test, test_pred))
```
That's it! Well done
**Keep this code safe, as we will use this notebook later on, to build production code, in our next assignement!!**
| github_jupyter |
<a name="top"></a>
<div style="width:1000 px">
<div style="float:right; width:98 px; height:98px;">
<img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
</div>
<h1>Hodographs</h1>
<h3>Unidata Python Workshop</h3>
<div style="clear:both"></div>
</div>
<hr style="height:2px;">
<div style="float:right; width:250 px"><img src="https://unidata.github.io/MetPy/latest/_images/sphx_glr_Advanced_Sounding_001.png" alt="Example Skew-T" style="height: 500px;"></div>
### Questions
1. What is a hodograph?
1. How can MetPy plot hodographs?
1. How can the style of the hodographs be modified to encode other information?
### Objectives
1. <a href="#upperairdata">Obtain upper air data</a>
1. <a href="#simpleplot">Make a simple hodograph</a>
1. <a href="#annotate">Annotate the hodograph with wind vectors</a>
1. <a href="#continuous">Color the plot (continuous)</a>
1. <a href="#segmented">Color the plot (segmented)</a>
<a name="upperairdata"></a>
## Obtain upper air data
Just as we learned in the siphon basics and upper air and skew-T notebook, we need to obtain upperair data to plot. We are going to stick with September 10, 2017 at 00Z for Key West, Fl. If you need a review on obtaining upper air data, please review those lessons.
```
from datetime import datetime
from metpy.units import pandas_dataframe_to_unit_arrays
from siphon.simplewebservice.wyoming import WyomingUpperAir
df = WyomingUpperAir.request_data(datetime(1998, 10, 4, 0), 'OUN')
data = pandas_dataframe_to_unit_arrays(df)
```
<a href="#top">Top</a>
<hr style="height:2px;">
<a name="simpleplot"></a>
## Make a Simple Hodograph
The hodograph is a plot of the wind shear in the sounding. It is constructed by drawing the winds as vectors from the origin and connecting the heads of those vectors. MetPy makes this simple!
```
import matplotlib.pyplot as plt
from metpy.plots import Hodograph
%matplotlib inline
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
h = Hodograph(ax, component_range=60.)
h.add_grid(increment=20)
h.plot(data['u_wind'], data['v_wind'], color='tab:red')
```
It's relatively common to not want or need to display the entire sounding on a hodograph. Let's limit these data to the lowest 10km and plot it again.
```
import metpy.calc as mpcalc
from metpy.units import units
_, u_trimmed, v_trimmed, speed_trimmed, height_trimmed = mpcalc.get_layer(data['pressure'], data['u_wind'],
data['v_wind'], data['speed'], data['height'],
heights=data['height'], depth=10 * units.km)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
h = Hodograph(ax, component_range=30.)
h.add_grid(increment=10)
h.plot(u_trimmed, v_trimmed, color='tab:red')
```
<a name="annotate"></a>
## Annotate the hodograph with wind vectors
It may be useful when introducing hodographs to actually show the wind vectors on the plot. The `wind_vectors` method does exactly this. It is often necessary to decimate the wind vectors for the plot to be intelligible.
```
h.wind_vectors(u_trimmed[::3], v_trimmed[::3])
fig
```
We can also set the limits to be asymmetric to beter utilize the plot space.
```
ax.set_xlim(-10, 30)
ax.set_ylim(-10, 20)
fig
```
<a name="continuous"></a>
## Color the plot (continuous)
We can color the line on the hodograph by another variable as well. In the simplest case it will be "continuously" colored, changing with the value of the variable such as windspeed.
```
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
h = Hodograph(ax, component_range=30.)
h.add_grid(increment=10)
h.plot_colormapped(u_trimmed, v_trimmed, speed_trimmed)
from metpy.plots import colortables
import numpy as np
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
norm, cmap = colortables.get_with_range('Carbone42', np.min(speed_trimmed), np.max(speed_trimmed))
h = Hodograph(ax, component_range=30.)
h.add_grid(increment=10)
h.plot_colormapped(u_trimmed, v_trimmed, speed_trimmed, cmap=cmap, norm=norm)
```
<a name="segmented"></a>
## Color the plot (segmented)
It may be useful when introducing hodographs to actually show the wind vectors on the plot. The `wind_vectors` method does exactly this. It is often necessary to decimate the wind vectors for the plot to be intelligible.
We can also color the hodograph based on another variable - either continuously or in a segmented way. Here we'll color the hodograph by height above ground level.
```
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
boundaries = np.array([0, 1, 3, 5, 8]) * units.km
colors = ['tab:red', 'tab:green', 'tab:blue', 'tab:olive']
# Since we want to do things in terms of AGL, we need to make AGL heights
agl = height_trimmed - height_trimmed[0]
h = Hodograph(ax, component_range=30.)
h.add_grid(increment=10)
h.plot_colormapped(u_trimmed, v_trimmed, agl, bounds=boundaries, colors=colors)
```
<a href="#top">Top</a>
<hr style="height:2px;">
| github_jupyter |
## Community Detection
In this notebook we will walk through a number of methods for community detection using a simple example dataset.
```
import numpy,pandas
import networkx as nx
import matplotlib.pyplot as plt
import sys
import operator
import itertools
sys.path.append('../utils')
from utils import algorithm_u
from utils import module_degree_zscore,participation_coefficient
%matplotlib inline
```
### Example graph
First let's create a simple graph that has two communities, each of which is fully connected, with one node from each community connected to a node in the other community.
```
G = nx.Graph()
# nodes 1-4 are members of one community, and 5-8 are members of another, with 1 and 5 connected as well
edges=[(1,2),(1,3),(1,4),(1,5),(5,6),(5,7),(5,8),(6,7),(6,8),(7,8),(2,3),(2,4),(3,4)]
G.add_edges_from(edges)
nx.draw_spring(G,with_labels=True,node_color='yellow')
```
### Girvan-Newman method
The Girvan-Newman method is a *divisive* clustering method, meaning that it starts with the full graph and tries to find the best way to divide it into some number of clusters by removing particular edges.
The algorithm was defined by [Girvan & Newman (2002)](http://www.pnas.org/content/99/12/7821) as follows:
1. Calculate the betweenness for all edges in the network.
2. Remove the edge with the highest betweenness.
3. Recalculate betweennesses for all edges affected by the
removal.
4. Repeat from step 2 until no edges remain.
Let's implement this for the example dataset, finding two clusters.
```
n_clusters=len([i for i in nx.connected_components(G)])
G_tmp=G.copy() # make a copy of the graph to work with
while n_clusters==1:
# step 1: compute edge betweenness
eb=nx.edge_betweenness(G_tmp)
# step 2: remove the edge with highest betweeness
# find the edge with the largest value of edge betweenness
ebmax = max(eb.items(), key=operator.itemgetter(1))[0]
# remove it from the graph
G_tmp.remove_edges_from([ebmax])
print('removing edge:',ebmax)
# compute the number of connected components to see if we have
# induced new clusters, and continue looping if not
n_clusters=len([i for i in nx.connected_components(G_tmp)])
print('found two clusters:')
print([list(i.nodes) for i in nx.connected_component_subgraphs(G_tmp)])
```
### Modularity
One of the most commonly used set of methods for community detection rely upon the concept of *modularity*. Here we will walk through the computation of modularity for a simple graph.
Here is one expression for modularity, from [Fortunato, 2010](https://arxiv.org/pdf/0906.0612.pdf):
$$
Q = \frac{1}{2m}\sum_{ij}(A_{ij} - P_{ij})\delta(C_i,C_j)
$$
where $m$ is the total number of edges in the graph, $A$ is the adjacency matrix, and $P_{ij}$ is the expected number of edges between i and j according to the apporpriate null model, and $\delta$ is a matrix that denotes whether vertices i and j are within the same community:
$$
\delta(C_i,C_j) = \left\{
\begin{array}{ll}
1\ if\ C_i=C_j\\
0\ if\ C_i \neq C_j\\
\end{array}\right.
$$
To compute modularity for our example graph (which looks to be fairly modular), we first need the adjacency matrix.
```
A = nx.to_numpy_array(G)
print(A)
```
Next we need to generate the $\delta$ matrix denoting whether each pair of vertices is a member of the same community.
```
partition = [1,1,1,1,2,2,2,2]
delta = numpy.zeros((len(partition),len(partition)))
for i in range(len(partition)):
for j in range(len(partition)):
delta[i,j]=int(partition[i]==partition[j])
print(delta)
```
The final thing we need is the expected edge frequency from the null model. In general we want the null model to match the actual graph as closely as possible, except for the clustering. It is common to use a null model in which the degree sequence (i.e. the values of degrees for all nodes) is identical to the real graph; this is a more stringent null model than simply equating the degree distribution. This null model can be written as (Fortunato, 2010):
$$
Q = \frac{1}{2m}\sum_{ij}\bigg(A_{ij} - \frac{k_i k_j}{2m}\bigg)\delta(C_i,C_j)
$$
where $k_i$ is the degree of vertex $i$. Note that this null model will not necessarily give an identical degree sequence on any particular realization, but should be the same on average.
```
m = len(G.edges)
k = [G.degree[i] for i in G.nodes] # degree values
Q=0
for i in range(len(k)):
for j in range(len(k)):
Q += (A[i,j] - (k[i]*k[j])/(2*m))*delta[i,j]
Q = Q/(2*m)
print(Q)
```
We can compare our answer to the one given by the built-in modularity function in NetworkX:
```
assert Q == nx.algorithms.community.quality.modularity(G,[{1,2,3,4},{5,6,7,8}])
```
Now let's examine how modularity varies with the partition. In this case, we can fairly easily compute all 128 possible partitions of the 8 nodes and compute modularity for each. In principle we should see that the modularity value is highest for the true partition.
```
Qvals=numpy.zeros(128)
partitions=[]
# loop through all possible partitions of edges into two communities:
for i,p in enumerate(algorithm_u([1,2,3,4,5,6,7,8],2)):
Qvals[i] = nx.algorithms.community.quality.modularity(G,p)
partitions.append(p)
plt.plot(numpy.sort(Qvals))
print('maximum Q:',numpy.max(Qvals))
print('best partition:',partitions[numpy.argsort(Qvals)[-1:][0]])
```
### Modularity optimization
In general, it's not possible to perform exhaustive computation of modularity for all possible partitions (since the number of possible partitions grows exponentially with the size of the graph), so a number of researchers have developed approximate methods that perform well in finding the partition with the maximum modularity value.
#### Greedy optimization
One approach (proposed initially by [Newman, 2004](https://pdfs.semanticscholar.org/29d4/dfae2807a67a2c66c720b4985cb599c4e245.pdf)) is to perform an *agglomerative* clustering using a ["greedy" algorithm ](https://en.wikipedia.org/wiki/Greedy_algorithm)- that is, an algorithm that makes the best possible choice at each point in the process, akin to climbing a hill by going the steepest upward direction at every point.
In Newman's greedy method, we start with each vertex in its own partition. We then find which combination of partitions would increase modularity the most, and combine those into one.
The implementation here would be far too inefficient to use with real data, but should help make clear how the algorithm works.
```
# create a function to compute modularity more easily
def modularity(G,partition):
A = nx.to_numpy_array(G)
m = len(G.edges)
delta = numpy.zeros((len(partition),len(partition)))
for i in range(len(partition)):
for j in range(len(partition)):
delta[i,j]=int(partition[i]==partition[j])
k = [G.degree[i] for i in G.nodes] # degree values
Q=0
for i in range(len(k)):
for j in range(len(k)):
Q += (A[i,j] - (k[i]*k[j])/(2*m))*delta[i,j]
Q = Q/(2*m)
return(Q)
Qvals=[-numpy.inf]
notbest=True
partition=numpy.array([1,2,3,4,5,6,7,8]) # initially assign all to the same
while notbest:
unique_partitions=numpy.unique(partition)
print('unique partitions:',unique_partitions)
# loop through all combinations of unique partitions
modvals={}
for i in range(len(unique_partitions)):
for j in range(i+1,len(unique_partitions)):
if i==j:
continue
tmp_part=numpy.array(partition)
tmp_part[tmp_part==unique_partitions[i]]=unique_partitions[j]
modvals[(unique_partitions[i],unique_partitions[j])]=modularity(G,tmp_part)
modmax = max(modvals.items(), key=operator.itemgetter(1))[0]
# this method assumes that Q increases monotonically to its maximum
if modvals[modmax]<numpy.max(numpy.array(Qvals)):
print('breaking: found best Q value!')
print(partition)
notbest=False
else:
print('collapsing:',modmax,modvals[modmax])
partition[partition==modmax[0]]=modmax[1]
Qvals.append(modvals[modmax])
```
### Spectral clustering
Another common method for community detection is spectral clustering, which uses the eigenvectors of matrices that describe the graph. While we could work with the adjacency matrix, it is more common to use the [Laplacian matrix](https://samidavies.wordpress.com/2016/09/20/whats-up-with-the-graph-laplacian/), which you can think of as describing the flow of some quantity away from any particular node in the graph.
The Laplacian L is defined as:
$$
L = D - A
$$
where $A$ is the adjacency matrix, and $D$ is a diagonal matrix where each entry is the degree of that particular node. Here it is for our example graph:
```
D = numpy.zeros(A.shape)
D[numpy.diag_indices_from(D)]=k
L = D - A
print(L)
```
In general, it's more useful to work with the normalized Laplacian, which normalizes by degree. We can compute this easily using linear algebra:
$$
Ln = D^{-\frac{1}{2}}LD^{-\frac{1}{2}}
$$
```
Ln = numpy.linalg.inv(numpy.sqrt(D)).dot(L).dot(numpy.linalg.inv(numpy.sqrt(D)))
print(Ln)
eig = numpy.linalg.eig(Ln)
def plot_eig(eig):
plt.figure(figsize=(8,8))
plt.imshow(eig[1]) # Get locations and labels
_=plt.xticks([i for i in range(len(eig[0]))], ['%0.2f'%i for i in eig[0]])
plt.xlabel('eigenvalue')
plt.ylabel("vertices")
plt.title('eigenvectors')
plt.tight_layout()
plot_eig(eig)
```
Note that there is a single zero eigenvalue, which represents the fact that there is a single graph component. Let's see what would happen if we were to add another component:
```
G_bigger = G.copy()
G_bigger.add_edges_from([(9,10),(10,11),(10,12)])
nx.draw_spring(G_bigger)
```
Now let's get the Laplacian matrix (this time using the built-in NetworkX function) and compute its eigenvalues.
```
Ln_bigger = nx.normalized_laplacian_matrix(G_bigger).todense()
eig_bigger=numpy.linalg.eig(Ln_bigger)
eig_bigger[0]
```
Here you can see that there are two zero-valued eigenvalues, reflecting the fact that there are two components.
Now look back at the second column in the eigenvector matrix above, corresponding to the second largest eigenvalue. Let's draw the graph and color the nodes according to the values of this second smallest eigenvalue:
```
nx.draw_spring(G,node_color=eig[1][:,1],cmap='viridis')
```
What this shows is that the eigenvector corresponding to the smallest nonzero eigenvalue of the Laplacian matrix divides the graph by its major communities.
Spectral clustering methods take this approach further by treating each vertex in a metric space defined by the eigenvectors, and then using these to perform various clustering operations (e.g., k-means clustering).
### Infomap
The [infomap algorithm](http://www.mapequation.org/code.html) uses a flow-based model along with concepts from information theory to identify communities in the data. It is based on the idea of a random walk across the network; the fundamental concept is that one can describe a random walk in terms of traversal across communities rather than individual nodes, and an accurate community partition should lead to a compact description of the network.
Let's first simulate a random walk across our example network.
```
import random
# length of random walk
walkLength = 1000
n=1 # start node
edges_visited = []
for k in range(walkLength):
e=[i[1] for i in list(G.edges(n))]
random.shuffle(e)
edges_visited.append((n,e[0]))
n=e[0]
```
Infomap uses a measure of the relative proportion of within-community versus between-community walks (though it does this using an information theoretic framework). We can get a simple idea of how this works by simply looking at how often the random walker switches between communities; the best partition should be the one that results in the smallest number of steps between communities. Let's create a function that can take a random walk and a community partition and tell us the proportion of within-community steps. We can then apply this to all possible partitions of our example graph, in order to see if the true partition indeed results in the greatest proportion of within-community steps.
```
def mean_walktype(edges,partition_list):
"""
compute the proportion of within-community steps in a random walk
edges is a list of tuples referring to edges in a random walk
partition_list is a list of lists, as returned by algorithm_u
"""
# turn partition_list into a partition index
partition=numpy.zeros(len(list(itertools.chain.from_iterable(partition_list))))
for i in partition_list[1]:
partition[i-1]=1
# create the delta function for the partition
delta = numpy.zeros((len(partition),len(partition)))
for i in range(len(partition)):
for j in range(len(partition)):
delta[i,j]=int(partition[i]==partition[j])
# create the list of walk types using the delta array
walktype=[] # 1 for within, 0 for between
for i in edges:
walktype.append(delta[i[0]-1,i[1]-1])
return(numpy.mean(walktype))
meanvals=numpy.zeros(128)
partitions=[]
# loop through all possible partitions into two communities:
for i,p in enumerate(algorithm_u([1,2,3,4,5,6,7,8],2)):
meanvals[i] = mean_walktype(edges_visited,p)
partitions.append(p)
```
Confirm that the true partition has the maximum probability of within-community steps
```
partitions[numpy.argmax(meanvals)]
mv=numpy.sort(meanvals)
plt.plot(mv)
# put a line for the value of the true partition
true_p=mean_walktype(edges_visited,[{1,2,3,4},{5,6,7,8}])
plt.plot([0,len(meanvals)],[true_p,true_p],'r',alpha=0.5,linestyle='dotted')
```
### Network hubs
We know that in any group there are some individuals who are better connected than others, and this was evident from the long-tailed degree distrbution in the Facebook data. In network science, such high-degree nodes are often referred to as "hubs". Given our characterization of community structure, we can further differentiate between different types of hubs. Some hubs are both highly connected within their own module and to nodes in other modules, which we refer to as *connector hubs*. Other hubs are highly connected, but primarily to other nodes within their own module, which we refer to as *provincial hubs*. These different types of hubs play different roles in network communication, and we will encounter them repeatedly as we discuss network neuroscience research.
A general approach to categorizing different types of hubs was presented by [Guimera and Amaral (2005)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2175124/), known as "cartographic analysis". This involves computing two node-level measures:
- within-module degree Z-score: The number of edges between a node and other nodes within its module, Z-scored across all members of that module.
- participation coefficient: A measure of the squared proportion of edges that are with nodes outside one's own module.
$$P_i = 1 - \sum_{s=1}^{N_M} \left ( \frac{k_{is}}{k_i} \right )^2$$
$k_{is}$ is the number of edges that fall within one's module (i.e. module degree), and $k_i$ is the total number of edges (i.e. degree).
Let's compute these for the example dataset, using functions defined in utils/utils.py
```
mdzs=module_degree_zscore(G, partition)
pc=participation_coefficient(G, partition)
```
Note that in our example network, all nodes are fully connected within their own module, which means that their module degree is all equal and thus the module degree Z-score is zero for nodes.
```
mdzs
```
However, if we look at the participation coefficient we will see that most nodes have a value of zero (since they are only connected to others from their own module), whereas the two nodes that connect the modules have a higher participation coefficient.
```
pc
nx.draw_spring(G,node_color=pc,cmap='viridis')
```
Cartographic analysis generally only works on large networks, since the Z-scoring of module degree requires a relatively large number of values to be computed effectively. Let's generate a large modular network to see this.
To generate such a network, we first create each module using the Barabasi-Albert model, and then we connect the modules by preferentially connecting high-degree nodes across modules.
```
nmods=3
modsize=100
nedges = 20 # for B-A generator
A_full = numpy.zeros((modsize*nmods,modsize*nmods))
partition=numpy.zeros(A_full.shape[0])
for m in range(nmods):
Gtmp=nx.barabasi_albert_graph(modsize,nedges)
partition[m*modsize:(m+1)*modsize]=m
A_full[m*modsize:(m+1)*modsize,m*modsize:(m+1)*modsize]=nx.adjacency_matrix(Gtmp).todense()
```
If we look at the adjacency matrix so far, we will see that there are no connections between modules, and plotting using spring-embedding shows three disconnected components:
```
plt.imshow(A_full)
G_mod=nx.from_numpy_array(A_full)
modularity(G_mod,partition)
plt.figure()
nx.draw_spring(G_mod,node_size=5,alpha=.5)
```
Now let's connect the modules using a second level of preferential attachment - that is, nodes that have higher degree within their own module are also more likely to be connected to another module. First let's confirm that the degree distribution is indeed long-tailed as we would expect:
```
degree = numpy.array([G_mod.degree[i] for i in G_mod.nodes])
_=plt.hist(degree,40)
```
Now we add edges connecting the high-degree nodes:
```
edgeidx=numpy.argsort(degree)[::-1]
n_bw_edges=40
p_edge=0.6
# randomly choose some of the high-degree nodes to be connector hubs
connectorhubs=edgeidx[:n_bw_edges][numpy.random.rand(n_bw_edges)<p_edge]
for i in itertools.combinations(connectorhubs,2):
if not i in G_mod.edges:
G_mod.add_edge(i[0],i[1])
mdzs=module_degree_zscore(G_mod, partition)
pc=participation_coefficient(G_mod, partition)
print(modularity(G_mod,partition))
nx.draw_spring(G_mod,alpha=.7,node_color=mdzs,cmap='viridis',node_size=pc*800)
```
Now let's create the cartographic profile for the network.
```
# put everything into a data frame
cartography=pandas.DataFrame(numpy.hstack((pc[:,numpy.newaxis],mdzs[:,numpy.newaxis])),
columns=['PC','MDZS'],
index=G_mod.nodes)
cartography
plt.figure(figsize=(12,8))
plt.scatter(cartography.PC,cartography.MDZS)
ax=plt.gca()
# print names for certain hubs
for i, txt in enumerate(cartography.index):
if cartography.MDZS[i]>4 or (cartography.MDZS[i]>2.5 and cartography.PC[i]>0.75):
ax.annotate(txt, (cartography.PC[i], cartography.MDZS[i]))
# add lines and anntation for Guimera/Amaral regions
plt.axvline(0.75, color='r',linestyle='dotted')
plt.axvline(0.3, color='r',linestyle='dotted')
plt.axhline(2.5, color='r',linestyle='dotted')
plt.xlabel('Participation coefficient')
plt.ylabel('Module degree z-score')
ymax=9
ax.fill_between([0,0.3], [2.5,2.5],[ymax,ymax], facecolor='green', alpha=0.25)
ax.fill_between([0.3,0.75], [2.5,2.5],[ymax,ymax], facecolor='blue', alpha=0.25)
ax.fill_between([0.75,1.], [2.5,2.5],[ymax,ymax], facecolor='red', alpha=0.25)
plt.annotate('Provincial hubs',[0.1,ymax+0.1])
plt.annotate('Connector hubs',[0.45,ymax+0.1])
plt.annotate('Kinless hubs',[0.825,ymax+0.1])
cartography['hubtype']=0
cartography['hubtype'][(cartography.MDZS>2)&(cartography.PC<=0.3)]=1
cartography['hubtype'][(cartography.MDZS>2)&(cartography.PC>0.3)]=2
plt.figure(figsize=(8,8))
pl=nx.spring_layout(G_mod)
nodelists={}
nx.draw_networkx_edges(G_mod,pos=pl,alpha=0.1)
colors=['black','blue','yellow']
alphas=[0.3,0.9,0.9]
for t in range(3):
nodelists[t]=[i for i in range(cartography.shape[0]) if cartography['hubtype'][i]==t]
nx.draw_networkx_nodes(G_mod,pos=pl,node_size=[(G_mod.degree(i)/6)**2 for i in nodelists[t]],
nodelist=nodelists[t],
node_color=colors[t],alpha=alphas[t])
plt.axis('off')
plt.tight_layout()
```
### Rich clubs
In many real-world networks (including brains) there is a subset of high-degree nodes that are preferentially connected to one another as well, which is referred to as a *rich club* ([van den Heuvel & Sporns, 2011](http://www.jneurosci.org/content/31/44/15775)). The presence of a rich club can be quantified using the rich club coefficent $\phi$, which is computed as follows:
$$
\phi(k) = \frac{2E_{>k}}{N_{>k}(N_{>k} - 1)}
$$
This is the ratio of edges between nodes with degree greater than k to the number of possible edges between those nodes. In general we want normalize this by comparing the observed value to what one expects on the basis of a matched random network (null model):
$$
\phi_{norm}(k) = \frac{\phi(k)}{\phi_{random}(k)}
$$
We compute this for each level of k and then examine the distribution to see whether it exceeds one. In order to assess the variability across multiple instantiations of the null model, we run it repeatedly to see the distribution of rcc values (this will take a few minutes):
```
def get_rcc(G,maxtries=10):
good_rcc=False
tries=0
while not good_rcc:
try:
rcc=nx.rich_club_coefficient(G_mod,normalized=True,Q=10)
good_rcc=True
except ZeroDivisionError:
tries+=1
if tries>=maxtries:
Exception('Too many tries!')
# return a vector rather than a dict
idx=numpy.sort(list(rcc.keys()))
return(numpy.array([rcc[i] for i in idx]))
nsims=100
rccdata=numpy.zeros((max(degree),nsims))*numpy.nan
for s in range(nsims):
tmp=get_rcc(G_mod)
rccdata[:max(degree),s]=tmp[:max(degree)]
minrcc=numpy.min(rccdata,1)
p=plt.plot(rccdata)
plt.xlabel('degree')
plt.ylabel('rich club coefficient')
rcc_thresh=1.25
plt.plot([0,max(degree)],[rcc_thresh,rcc_thresh])
mindegree=numpy.where(minrcc>rcc_thresh)[0][0]
print('Minimum degree with all RCC> %s:'%rcc_thresh,mindegree)
print('Density:',numpy.mean(degree>mindegree))
```
Plotting the RCC values across simulations shows that the RCC starts to be consistently above 1 around a degree of 40, and exceeds our arbitrary threshold of 1.25 for degrees greater than 59. Let's visualize the network highlighting those vertices:
```
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(G_mod,pos=pl,alpha=0.1)
nx.draw_networkx_nodes(G_mod,pos=pl,node_size=[(G_mod.degree(i)/6)**2 for i in G_mod.nodes],
node_color=(degree>mindegree),alpha=0.9)
plt.axis('off')
plt.tight_layout()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/srijan-singh/machine-learning/blob/main/Regression/Simple%20Regression/Models/Simple_Regression_M1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip install -U scikit-learn
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
%matplotlib inline
!wget -O FuelConsumption.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv
df = pd.read_csv("FuelConsumption.csv")
# To print the first five row of dataset
df.head()
# To print the stats of the data(number of entries,mean, standrad deviation, min, 25% of data, 50% of data, 75% of data, max)
df.describe()
# selecting features and exploring the data
cdf = df[['ENGINESIZE', 'CYLINDERS', 'FUELCONSUMPTION_COMB', 'CO2EMISSIONS']]
# displaying first 5 rows
cdf.head()
# Plotting histogram
cdf.hist()
# Printing the figures
plt.show()
# Plotting the scatter graph
plt.scatter(cdf.FUELCONSUMPTION_COMB, cdf.CO2EMISSIONS, color='blue')
# Labeling x-axis
plt.xlabel("Fuel Consumption Combination")
# Labeling y-axis
plt.ylabel("Emission")
plt.show()
plt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color="blue")
plt.xlabel("Engine Size")
plt.ylabel("Co2 Emission")
plt.show()
plt.scatter(cdf.CYLINDERS, cdf.CO2EMISSIONS, color = "blue")
plt.xlabel("Cylinders")
plt.ylabel("CO2 Emission")
plt.show()
plt.scatter(cdf.ENGINESIZE, cdf.CYLINDERS, color="blue")
plt.xlabel("Engine Size")
plt.ylabel("Cylinder")
plt.show()
# Distributing the Data
msk = np.random.rand(len(df)) < 0.8
train = cdf[msk]
test = cdf[~msk]
```
#Creating Simple Regression Model
Where feature is Engine Size and label is Co2 Emission
On a linear equation, we can state that:<br>
*y = mx + c* <br>
*Co2 Emission = (Intercept * Engine Size) + Biased Coefficeint*
```
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
plt.xlabel("Engine Size")
plt.ylabel("Co2 Emission")
plt.show()
```
### Modeling
```
from sklearn import linear_model
regr = linear_model.LinearRegression()
train_x = np.asanyarray(train[['ENGINESIZE']])
train_y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit(train_x, train_y)
print("Intercept: ", regr.intercept_," (m)")
print('Biased Coefficient: ',regr.coef_," (c)")
```
As mentioned, Coefficient and Intercept in the simple linear regression, are the parameters of the fit line. Given that it is a simple linear regression, with only 2 parameters, and knowing that the parameters are the intercept and slope of the line, sklearn can estimate them directly from our data. Notice that all of the data must be available to traverse and calculate the parameters
```
# Plotting the Graph
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color="blue")
plt.plot(train_x, regr.coef_[0][0]*train_x + regr.intercept_[0], '-r')
plt.xlabel("Engine size")
plt.ylabel("Emission")
```
###Evaluation
We compare the actual values and predicted values to calculate the accuracy of a regression model. Evaluation metrics provide a key role in the development of a model, as it provides insight to areas that require improvement.
There are different model evaluation metrics, lets use MSE here to calculate the accuracy of our model based on the test set:
- Mean absolute error: It is the mean of the absolute value of the errors. This is the easiest of the metrics to understand since it’s just average error.
- Mean Squared Error (MSE): Mean Squared Error (MSE) is the mean of the squared error. It’s more popular than Mean absolute error because the focus is geared more towards large errors. This is due to the squared term exponentially increasing larger errors in comparison to smaller ones.
- Root Mean Squared Error (RMSE).
- R-squared is not error, but is a popular metric for accuracy of your model. It represents how close the data are to the fitted regression line. The higher the R-squared, the better the model fits your data. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse).
```
from sklearn.metrics import r2_score
test_x = np.asanyarray(test[['ENGINESIZE']])
test_y = np.asanyarray(test[["CO2EMISSIONS"]])
test_y_ = regr.predict(test_x)
print("Mean absolute error: %.2f"% np.mean(np.absolute(test_y_ - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_ - test_y)**2))
print("R2-score: %.2f" % r2_score(test_y_, test_y))
```
#User Interface
```
users_engn_siz = np.asanyarray([[float(input("Engine Size: "))]])
prediction = regr.predict(users_engn_siz)
print("Co2 Emission:",prediction[0][0])
```
| github_jupyter |
# Preprocessing data
```
import json
import numpy as np
import csv
import sys
dictCountries={
"Alemania":"Germany",
"Austria":"Austria",
"Bélgica":"Belgium",
"Bulgaria":"Bulgaria",
"Chipre":"Cyprus",
"Croacia":"Croatia",
"Dinamarca":"Denmark",
"Eslovenia":"Slovenia",
"Estonia":"Estonia",
"Finlandia":"Finland",
"Francia":"France",
"Grecia":"Greece",
"Holanda":"Holland",
"Hungría":"Hungary",
"Irlanda":"Ireland",
"Italia":"Italy",
"Letonia":"Latvia",
"Lituania":"Lithuania",
"Luxemburgo":"Luxembourg",
"Malta":"Malta",
"Polonia":"Poland",
"Portugal":"Portugal",
"Reino Unido":"United Kingdom",
"República Checa":"Czech Rep.",
"República Eslovaca":"Slovakia",
"Rumanía":"Romania",
"Suecia":"Sweden",
"Federación de Rusia":"Russia",
"Noruega":"Norway",
"Serbia":"Serbia",
"Suiza":"Switzerland",
"Ucrania":"Ukraine"}
invdictCountries = {v: k for k, v in dictCountries.items()}
#Data from: Instuto Nacional de Estadística www.ine.es
f = open("./sources/info.txt", "r")
reader = csv.reader(f)
dic = {}
for row in reader:
name=row[0]
row[1]=row[1].replace(".","")
row[2]=row[2].replace(".","")
if row[1].isnumeric():
ret2013=int(row[1])
else:
ret2013=0
if row[2].isnumeric():
ret2016=int(row[2])
else:
ret2016=0
if name in dictCountries:
entry = {}
entry['r2k13']= ret2013
entry['r2k16']= ret2016
dic[dictCountries[name]]=entry
f.close()
f = open("./sources/info.json", "w")
f.write(json.dumps(dic))
f.close()
```
# Creating the map
```
import geopandas as gpd
import json
from collections import OrderedDict
from shapely.geometry import Polygon, mapping
from bokeh.models import GeoJSONDataSource, LinearColorMapper, LogColorMapper,ColorBar,LogTicker, AdaptiveTicker
from bokeh.io import show
from bokeh.plotting import figure, output_file
import bokeh.io
bokeh.io.output_notebook()
from bokeh.models import (
ColumnDataSource,
HoverTool,
LogColorMapper
)
from bokeh.palettes import Viridis6 as palette
from bokeh.palettes import (Blues9, BrBG9, BuGn9, BuPu9, GnBu9, Greens9,
Greys9, OrRd9, Oranges9, PRGn9, PiYG9, PuBu9,
PuBuGn9, PuOr9, PuRd9, Purples9, RdBu9, RdGy9,
RdPu9, RdYlBu9, RdYlGn9, Reds9, Spectral9, YlGn9,
YlGnBu9, YlOrBr9, YlOrRd9)
from bokeh.plotting import figure
standard_palettes = OrderedDict([("Blues9", Blues9), ("BrBG9", BrBG9),
("BuGn9", BuGn9), ("BuPu9", BuPu9),
("GnBu9", GnBu9), ("Greens9", Greens9),
("Greys9", Greys9), ("OrRd9", OrRd9),
("Oranges9", Oranges9), ("PRGn9", PRGn9),
("PiYG9", PiYG9), ("PuBu9", PuBu9),
("PuBuGn9", PuBuGn9), ("PuOr9", PuOr9),
("PuRd9", PuRd9), ("Purples9", Purples9),
("RdBu9", RdBu9), ("RdGy9", RdGy9),
("RdPu9", RdPu9), ("RdYlBu9", RdYlBu9),
("RdYlGn9", RdYlGn9), ("Reds9", Reds9),
("Spectral9", Spectral9), ("YlGn9", YlGn9),
("YlGnBu9", YlGnBu9), ("YlOrBr9", YlOrBr9),
("YlOrRd9", YlOrRd9)])
#obtain countries shapes
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
#print(world)
europe = (world.loc[world['name'] == 'Spain'])
cities = gpd.read_file(gpd.datasets.get_path('naturalearth_cities'))
print(cities)
cities.plot()
```
| github_jupyter |
```
import holoviews as hv
hv.extension('bokeh')
hv.opts.defaults(hv.opts.Curve(width=500),
hv.opts.Image(width=500, colorbar=True, cmap='Viridis'))
import numpy as np
import scipy.signal
import scipy.fft
from IPython.display import Audio
```
# Diseño de sistemas y filtros IIR
Un filtro FIR de buena calidad puede requerir una gran cantidad de coeficientes
Es posible implementar filtros más eficientes usando **recursividad**. Esta es la base de los filtros de respuesta al impulso infinita o IIR que veremos en esta lección
## Definición de un sistema IIR
Generalizando el sistema FIR para incluir versiones pasadas de la salida y asumiendo $a[0] = 1$ llegamos a
$$
\begin{align}
y[n] &= b[0] x[n] + b[1] x[n-1] + b[2] x[n-2] + \ldots + b[L] x[n-L] \nonumber \\
& - a[1] y[n-1] - a[2] y[n-2] - \ldots - a[M] y[n-M] \nonumber \\
&= \sum_{l=0}^{L} b[l] x[n-l] - \sum_{m=1}^{M} a[m] y[n-m] \nonumber \\
\sum_{m=0}^{M} a[m] y[n-m] &= \sum_{l=0}^{L} b[l] x[n-l] \nonumber \\
(a * y)[n] &= (b * x)[n], \nonumber
\end{align}
$$
es decir dos convoluciones discretas que definen una **ecuación de diferencias**
Este tipo de sistema se conoce como
- sistema *infinite impulse response* (IIR)
- sistema *auto-regresive moving average* (ARMA)
- autoregresivo de orden M: incluye valores pasados de la salida
- media movil de orden L+1: pondera el valor presente y pasados de la entrada
Podemos ver el sistema IIR como una generalización del sistema FIR. El caso particular del sistema FIR se recupera si
$a[m] = 0$ para $m=[1, \ldots, M]$
### Respuesta en frecuencia del sistema IIR
Aplicando la transformada de Fourier convertimos las convoluciones en multiplicaciones y encontramos la respuesta en frecuencia como
$$
\begin{align}
\text{DFT}_N[(a * y)[n]] &= \text{DFT}_N[(b * x)[n]] \nonumber \\
A[k] Y[k] &= B[k] X[k] \nonumber \\
H[k] = \frac{Y[k]}{X[k]} &= \frac{B[k]}{A[k]} = \frac{ \sum_{l=0}^L b[l]e^{-j \frac{2\pi}{N} nl} }{ \sum_{m=0}^M a[m]e^{-j \frac{2\pi}{N} mk}} \nonumber
\end{align}
$$
que existe siempre que $A[k] \neq 0$.
La respuesta en frecuencia también suele expresarse como
$$
H[k] = K \frac{ \prod_{l=1}^L (e^{j \frac{2\pi}{N} k}- \beta[l]) }{ \prod_{m=1}^M (e^{j \frac{2\pi}{N} k}- \alpha[m])}
$$
donde
- $K$ se llama **ganancia**
- las raices del polinomio del numerador $\alpha$ se llaman conjuntamente **ceros**
- las raices del polinomio del denominador $\beta$ se llaman conjuntamente **polos**
### Ejemplo de respuesta al impulso de un sistema IIR
Consideremos el siguiente sistema IIR
$$
\begin{align}
y[n] &= (1-\gamma) x[n] + \gamma y[n-1] \nonumber \\
y[n] - \gamma y[n-1] &= (1-\gamma) x[n] \nonumber
\end{align}
$$
Los coeficientes del sistema son
$a[0] = 1$, $a[1] = -\gamma$ y $b[0] = (1-\gamma)$
Es decir que es AR de orden 1 y MA de orden 1
¿Cúal es su respuesta al impulso? Asumiendo $y[n]=0, n<0$, tenemos que
$$
\begin{matrix}
n & \delta[n] & y[n] \\
-2 & 0 & 0 \\
-1 & 0 & 0 \\
0 & 1 & (1-\gamma) \\
1 & 0 & \gamma(1-\gamma) \\
2 & 0 & \gamma^2(1-\gamma) \\
3 & 0 & \gamma^3(1-\gamma) \\
4 & 0 & \gamma^4(1-\gamma) \\
\end{matrix}
$$
¿Cómo cambia la respuesta al impulso con distintos valores de $\gamma$? ¿Qué pasa si $\gamma \geq 1$?
Respondamos estas preguntas visualizando la respuesta al impulso de este sistema con la función `scipy.signal.dimpulse`
```
# Valores de gamma que probaremos:
gamma = [-1.5, -1, -0.5, 0.5, 1., 1.5]
p = []
for g in gamma:
t, y = scipy.signal.dimpulse(([1-g, 0], [1,-g], 1), x0=0, n=30)
p.append(hv.Curve((t, y[0][:, 0]), label=f"gamma={g}"))
hv.Layout(p).cols(3).opts(hv.opts.Curve(width=250, height=200, axiswise=True))
```
De las figuras podemos ver que:
- Para $\gamma < 0$ (primera fila) los coeficientes del sistema son alternantes en signo
- Para $|\gamma| < 1$ los coeficientes del sistema tienden a cero
- Para $|\gamma| > 1$ los coeficientes del sistema divergen y tienen a infinito
:::{warning}
A diferencia de un sistema FIR, el sistema IIR puede tener configuraciones inestables en que los coeficientes crecen o decrecen infinitamente
:::
Por otro lado consideremos el sistema anterior y asumamos que $|\gamma|<1$, desenrollando tenemos que
$$
\begin{align}
y[0] &= (1-\gamma) x[0] \nonumber \\
y[1] &= (1-\gamma) (x[1] + \gamma x[0]) \nonumber \\
y[2] &= (1-\gamma) (x[2] + \gamma x[1] + \gamma^2 x[0]) \nonumber \\
y[3] &= (1-\gamma) (x[3] + \gamma x[2] + \gamma^2 x[1] + \gamma^3 x[0]) \nonumber \\
y[4] &= (1-\gamma) (x[4] + \gamma x[3] + \gamma^2 x[2] + \gamma^3 x[1] + \gamma^4 x[0]) \nonumber \\
y[5] &= \ldots \nonumber
\end{align}
$$
:::{note}
Con un sistema IIR de pocos coeficientes podemos representar un sistema FIR considerablemente más grande
:::
En el ejemplo anterior, si escogemos $\gamma$ tal que $\gamma^{20 }\approx 0$ entonces aproximamos un sistema FIR de orden 20 con tan sólo 3 coeficientes
### Ejemplo de respuesta en frecuencia de un sistema IIR
Para el sistema del ejemplo anterior su respuesta en frecuencia es
$$
\begin{align}
Y[k] &= (1-\gamma) X[k] + \gamma Y[k] e^{-j \frac{2\pi}{N} k} \nonumber \\
H[k] = \frac{Y[k]}{X[k]} &= \frac{1-\gamma}{1 - \gamma e^{-j \frac{2\pi}{N} k} } \nonumber
\end{align}
$$
que en notación de polos y ceros se escribe como
$$
H[k] = (1-\gamma)\frac{e^{j \frac{2\pi}{N} k} - 0}{e^{j \frac{2\pi}{N} k} - \gamma }
$$
es decir que tiene un cero en $0$, un polo en $\gamma$ y una ganancia de $(1-\gamma)$
Para entender mejor este sistema estudiemos la magnitud de $|H[k]|$ para $\gamma < 1$
$$
\begin{align}
| H[k]| &= \frac{|1-\gamma|}{|1 - \gamma e^{-j \frac{2\pi}{N} k}|} \nonumber \\
&= \frac{1-\gamma}{\sqrt{1 - 2\gamma \cos(\frac{2\pi}{N} k) + \gamma^2}} \nonumber
\end{align}
$$
¿Cómo se ve $|H[k]|$? ¿Qué función cumple este sistema?
```
k = np.arange(-24, 25)/50
Hk = lambda gamma, k : (1-gamma)/np.sqrt(1 - 2*gamma*np.cos(2.0*np.pi*k) + gamma**2)
p = []
for gamma in [0.25, 0.5, 0.75]:
p.append(hv.Curve((k, Hk(gamma, k)), 'Frecuencia', 'Respuesta', label=f'gamma={gamma}'))
hv.Overlay(p)
```
:::{note}
Este sistema atenua las frecuencias altas, es decir que actua como un filtro pasa bajos
:::
## Diseño de filtros IIR simples
Los filtros IIR más simples son los de un polo y un cero, es decir filtros de primer orden
$$
H[k] = \frac{b[0] + b[1] e^{-j \frac{2\pi}{N} k}}{1 + a[1] e^{-j \frac{2\pi}{N} k}} = K\frac{e^{j \frac{2\pi}{N} k} - \beta}{e^{j \frac{2\pi}{N} k} - \alpha }
$$
donde podemos reconocer
- $b[0]=K$
- $\beta = - b[1] \cdot K$
- $\alpha=-a[1]$
Definimos la frecuencia de corte $f_c$ como aquella frecuencia en la que el filtro alcanza una atenuación de 0.7 (-3 dB). Haciendo la equivalencia con el ejemplo anterior tenemos que $\gamma = e^{-2\pi f_c}$
### Receta para un filtro pasa bajo IIR con frecuencia de corte $f_c$
Asignamos
- $b[0] = 1 - e^{-2\pi f_c}$
- $b[1] = 0$
- $a[1] = -e^{-2\pi f_c}$
Lo que resulta en la siguiente respuesta en frecuencia
$$
H[k] = \frac{1-e^{-2\pi f_c}}{1 - e^{-2\pi f_c} e^{-j \frac{2\pi}{N} k}} = (1-e^{-2\pi f_c}) \frac{(e^{j \frac{2\pi}{N} k}- 0)}{(e^{j \frac{2\pi}{N} k} - e^{-2\pi f_c} )}
$$
Es decir un cero en $0$, un polo en $e^{-2\pi f_c}$ y ganancia $1-e^{-2\pi f_c}$
### Receta para un filtro pasa alto IIR con frecuencia de corte $f_c$
Asignamos
- $b[0] = (1 + e^{-2\pi f_c})/2$
- $b[1] = -(1 + e^{-2\pi f_c})/2$
- $a[1] = -e^{-2\pi f_c}$
Lo que resulta en la siguiente respuesta en frecuencia
$$
H[k] = \frac{1+e^{-2\pi f_c}}{2} \frac{(e^{j \frac{2\pi}{N} k} - 1)}{(e^{j \frac{2\pi}{N} k} - e^{-2\pi f_c})}
$$
Es decir un cero en $1$, un polo en $e^{-2\pi f_c}$ y ganancia $\frac{1+e^{-2\pi f_c}}{2}$
### Aplicar un filtro a una señal con scipy
Para filtrar una señal unidimensional con un filtro IIR (sin variar la fase de la señal) podemos utilizar la función
```python
scipy.signal.filtfilt(b, # Coeficientes del numerador
a, # Coeficientes del denominador
x, # Señal a filtrar
...
)
```
Los siguientes ejemplos muestran un señal de tipo pulso rectangular filtrada con sistemas IIR de primer orden pasa bajo y pasa-alto diseñados con las recetas mostradas anteriormente
```
n = np.arange(0, 500)
x = 0.5 + 0.5*scipy.signal.square((n)/(2.*np.pi*5), duty=0.3)
def iir_low_pass(signal, fc):
gamma = np.exp(-2*np.pi*(fc))
b, a = [(1-gamma), 0], [1, -gamma]
return scipy.signal.filtfilt(b, a, signal)
y = {}
for fc in [0.05, 0.02, 0.01]:
y[fc] = iir_low_pass(x, fc)
px = hv.Curve((n, x))
py = []
for fc, y_ in y.items():
py.append(hv.Curve((n, y_), label=f'fc={fc}'))
hv.Layout([px, hv.Overlay(py)]).cols(1).opts(hv.opts.Curve(height=200))
def iir_high_pass(signal, fc):
gamma = np.exp(-2*np.pi*(fc))
b, a = [(1+gamma)/2, -(1+gamma)/2], [1, -gamma]
return scipy.signal.filtfilt(b, a, signal)
y = {}
for fc in [0.01, 0.02, 0.05]:
y[fc] = iir_high_pass(x, fc)
px = hv.Curve((n, x))
py = []
for fc, y_ in y.items():
py.append(hv.Curve((n, y_), label=f'fc={fc}'))
hv.Layout([px, hv.Overlay(py)]).cols(1).opts(hv.opts.Curve(height=200))
```
:::{note}
El filtro pasa-bajos suaviza los cambios de los pulsos rectangulares. El filtro pasa-altos elimina las zonas constantes y resalta los cambios de la señal.
:::
## Diseño de filtros IIR de segundo orden
Los filtros IIR de segundo orden o **biquad** tienen dos polos y dos ceros.
Su respuesta en frecuencia es
$$
H[k] = \frac{b[0] + b[1] W_N^k + b[2] W_N^{2k}}{1 + a[1] W_N^k + a[2] W_N^{2k}} = K \frac{(W_N^{-k} - \beta_1) (W_N^{-k} - \beta_2)}{(W_N^{-k} - \alpha_1)(W_N^{-k} - \alpha_2)},
$$
donde $W_N = e^{-j \frac{2 \pi}{N}}$ y la relación entreo coeficientes y polos/ceros es:
$$
b[0] = K, \quad b[1] = -K (\beta_1 + \beta_2), \quad b[2]= K \beta_1\beta_2
$$
$$
a[1] = - (\alpha_1 + \alpha_2), \quad a[2]=\alpha_1 \alpha_2
$$
Con arquitecturas de segundo orden se pueden crear filtros pasabanda y rechaza banda
## Diseño de filtros IIR de orden mayor
Para crear los coeficientes de filtro IIR de orden mayor podemos usar la función
```python
scipy.signal.iirfilter(N, # Orden del filtro
Wn, # Frecuencias de corte (normalizadas en [0,1])
fs, # Frecuencia de muestreo
btype='bandpass', # Tipo de filtro: 'bandpass', 'lowpass', 'highpass', 'bandstop'
ftype='butter', # Familia del filtro: 'butter', 'ellip', 'cheby1', 'cheby2', 'bessel'
output='ba', # Retornar coeficientes
...
)
```
El filtro Butterworth es óptimo en el sentido de tener la banda de paso lo más plana posible.
Otros filtros se diseñaron con otras consideraciones.
Los filtros IIR digitales están basados en los filtros IIR analógicos.
Observe como al aumentar el orden el filtro pasabajo IIR comienza a cortar de forma más abrupta
```
Hk = {}
for order in [1, 2, 5, 20]:
b, a = scipy.signal.iirfilter(N=order, Wn=0.2, fs=1,
ftype='butter', btype='lowpass', output='ba')
freq, response = scipy.signal.freqz(b, a, fs=1)
Hk[order] = np.abs(response)
p = []
for order, response in Hk.items():
p.append(hv.Curve((freq, response), 'Frecuencia', 'Respuesta', label=f'orden={order}'))
hv.Overlay(p)
```
## Comparación de la respuesta en frecuencia de filtros FIR e IIR del orden equivalente
Comparemos la respuesta en frecuencia de un filtro IIR y otro FIR ambos pasa-bajo con 20 coeficientes
```
Fs = 1
fc = 0.25
h = scipy.signal.firwin(numtaps=20, cutoff=fc, pass_zero=True, window='hann', fs=Fs)
b, a = scipy.signal.iirfilter(N=9, Wn=fc, fs=Fs, ftype='butter', btype='lowpass')
display(len(h), len(b)+len(a))
freq_fir, response_fir = scipy.signal.freqz(h, 1, fs=Fs)
freq_iir, response_iir = scipy.signal.freqz(b, a, fs=Fs)
p1 = hv.Curve((freq_fir, np.abs(response_fir)), 'Frecuencia', 'Respuesta', label='FIR')
p2 = hv.Curve((freq_iir, np.abs(response_iir)), 'Frecuencia', 'Respuesta', label='IIR')
hv.Overlay([p1, p2])*hv.VLine(fc).opts(color='k', alpha=0.5)
```
La linea negra marca la ubicación de la frecuencia de corte
:::{note}
El filtro IIR es mucho más abrupto, es decir filtra mejor, que el filtro FIR equivalente
:::
Una desventaja del filtro IIR es que por definición introduce una desfase no constante en la señal de salida
```
freq_fir, delay_fir = scipy.signal.group_delay(system=(h, 1), fs=Fs)
freq_iir, delay_iir = scipy.signal.group_delay(system=(b, a), fs=Fs)
p1 = hv.Curve((freq_fir, delay_fir), 'Frecuencia', 'Desfase', label='FIR')
p2 = hv.Curve((freq_iir, delay_iir), 'Frecuencia', 'Desfase', label='IIR')
hv.Overlay([p1, p2])*hv.VLine(fc).opts(color='k', alpha=0.5)
```
¿Cómo se ve una señal filtrada donde se preserva la fase versus una donde no se preserva la fase?
Consideremos la señal rectangular anterior y apliquemos un filtro pasa-bajo IIR de orden 1
Esta vez compararemos el filtro con la función `scipy.signal.lfilter` y la función `scipy.signal.filtfilt`. La primera no preserva la fase mientras que la segunda si lo hace
```
Fs = 1
fc = 0.01
n = np.arange(0, 500)
x = 0.5 + 0.5*scipy.signal.square((n)/(2.*np.pi*5), duty=0.3)
b, a = scipy.signal.iirfilter(N=1, Wn=fc, fs=Fs, ftype='butter', btype='lowpass')
# No se preserva la fase
y_lfilter = scipy.signal.lfilter(b, a, x)
# Se preserva la fase
y_filtfilt = scipy.signal.filtfilt(b, a, x)
px = hv.Curve((n, x), 'Tiempo', 'Entrada')
py = []
py.append(hv.Curve((n, y_filtfilt), 'Tiempo', 'Salida', label=f'Fase constante'))
py.append(hv.Curve((n, y_lfilter), 'Tiempo', 'Salida', label=f'Fase no constante'))
hv.Layout([px, hv.Overlay(py)]).cols(1).opts(hv.opts.Curve(height=200))
```
:::{note}
En el caso donde no se preserva la fase podemos notar que la señal de salida está desplazada con respecto a la original. Además los cambios tienen una transición asimétrica
:::
La función `scipy.signal.filtfilt` "arregla" el problema del desfase filtrando la señal dos veces. La primera vez se filtra hacia adelante en el tiempo y la segunda vez hacia atrás. Por ende no se puede aplicar en un escenario de tipo *streaming* donde los datos van llegando de forma causal.
En una aplicación causal donde se necesite preservar la fase debemos usar un filtro FIR.
## Apéndice: Efectos de audio con filtros IIR
El siguiente ejemplo muestra como implementar el conocido filtro <a href="https://en.wikipedia.org/wiki/Wah-wah_(music)">Wah-wah</a> usando un sistema IIR
Este es un filtro pasabanda modulado con ancho de pasada fijo $f_b$ [Hz] y una frecuencia central variable $f_c$ [Hz], donde La frecuencia central se modula con una onda lenta
Se modela como el siguiente sistema **IIR**
$$
H[k] = \frac{(1+c)W_N^{2k} -(1+c) }{W_N^{2k} + d(1-c)W_N^k -c}
$$
donde
$$
d=-\cos(2\pi f_c/f_s)
$$
y
$$
c = \frac{\tan(\pi f_b/f_s) -1}{\tan(2\pi f_b /f_s)+1}
$$
Veamos como modifica este filtro una señal de audio
```
import librosa
data, fs = librosa.load("../../data/DPSAU.ogg")
Audio(data, rate=fs)
data_wah = []
zi = np.zeros(shape=(2,))
# Parámetros fijos del filtro
fb, Nw = 200, 5
c = (np.tan(np.pi*fb/fs) - 1.)/(np.tan(2*np.pi*fb/fs) +1)
# Filtramos una ventana de la señal moviendo lentamente fc
for k in range(len(data)//Nw):
# Cálculo de la frecuencia central
fc = 500 + 2000*(np.cos(2.0*np.pi*k*30./fs) +1)/2
d = -np.cos(2*np.pi*fc/fs)
# Coeficientes del filtro
b, a = [(1+c), 0, -(1+c)], [1, d*(1-c), -c]
# Filtramos, usando el filtrado anterior como borde (zi)
data2, zi = scipy.signal.lfilter(b, a, data[k*Nw:(k+1)*Nw], zi=zi)
# Guardamos
data_wah.append(data2)
Audio(np.hstack(data_wah), rate=int(fs))
```
Si quieres profundizar en el tema de los filtros IIR aplicados a efectos de audio recomiendo: https://www.ee.columbia.edu/~ronw/adst-spring2010/lectures/lecture2.pdf
| github_jupyter |
# Assignment 2. Programming Intelligent Agents
MTY - A01152534 - Jorge Antonio Ayala Urbina
MTY - Datos Ale
MTY - A01037093 - Miguel Angel Cruz Gomez
```
from agents import *
import random
# Create things
# Treasure1 thing
class T(Thing):
pass
# Treasure2 thing
class t(Thing):
pass
#Reusable tool thing
class H(Thing):
pass
#Disposable tool thing
class h(Thing):
pass
#Wall thing
class w(Thing):
pass
#In this enviroment, the agent can see it all
class IslandDay(Environment):
#Flag to be activated when there are no more treasures or no treasures that can be gathered
#agent_no_goals = False
#As the environment if fully observable, the percept should contain everything found in the environment
def percept(self, agent):
in_existence = self.things
in_posession = agent.holding
at_position = self.list_things_at(agent.location)
perception = [in_existence, in_posession, at_position]
def rowgenerator(self, rownumber):
rnum = rownumber
chars = [rnum]
for cell in range(0, 6):
if len(self.list_things_at((rownumber, cell), tclass = Thing)) != 0:
for thing in self.list_things_at((rownumber, cell), tclass = Thing):
if (isinstance(thing,Agent) and len(self.list_things_at((rownumber, cell), tclass = Thing)) == 1):
chars.append('-')
break
elif not isinstance(thing, Agent):
if isinstance(thing, T):
chars.append('T')
break
elif isinstance(thing, t):
chars.append('t')
break
elif isinstance(thing, H):
chars.append('H')
break
elif isinstance(thing, h):
chars.append('h')
break
elif isinstance(thing, w):
chars.append('X')
break
else:
chars.append('-')
chars.append(rnum)
print('{} {} {} {} {} {} {} {}'.format(chars[0], chars[1], chars[2], chars[3], chars[4], chars[5], chars[6], chars[7]))
print("\ 0 1 2 3 4 5 /")
rg1 = rowgenerator(self, 0)
rg2 = rowgenerator(self, 1)
rg3 = rowgenerator(self, 2)
rg4 = rowgenerator(self, 3)
rg5 = rowgenerator(self, 4)
rg6 = rowgenerator(self, 5)
print("/ 0 1 2 3 4 5 \\")
return perception
def execute_action(self, agent, action):
if action == "Move":
location = agent.location
possMoves = [location]
moveUp = (location[0],location[1]+1)
moveDown = (location[0],location[1]-1)
moveRight = (location[0]+1,location[1])
moveLeft = (location[0]-1,location[1])
if (moveUp[1] < 6 and len(self.list_things_at(moveUp, tclass=w)) == 0):
possMoves.append(moveUp)
if (moveDown[1] >= 0 and len(self.list_things_at(moveDown, tclass=w)) == 0):
possMoves.append(moveDown)
if (moveRight[0] < 6 and len(self.list_things_at(moveRight, tclass=w)) == 0):
possMoves.append(moveRight)
if (moveLeft[0] >= 0 and len(self.list_things_at(moveLeft, tclass=w)) == 0):
possMoves.append(moveLeft)
direction = possMoves.index(random.choice(possMoves))
agent.move(possMoves[direction])
# If the action is to pick up reusable tool, run Gresure and remove the thing from the environment
# Appending the object to the agent is done at the agent class. Same for all the following methods
elif action == "Greuse":
items = self.list_things_at(agent.location, tclass = H)
holding = agent.get_held_things(tclass = H)
if len(items) != 0:
agent.Greuse(items[0])
self.delete_thing(items[0])
elif action == "Gdispos":
items = self.list_things_at(agent.location, tclass = h)
holding = agent.get_held_things(tclass = h)
if len(items) != 0:
agent.Gdispos(items[0])
self.delete_thing(items[0])
elif action == "GTreas1":
items = self.list_things_at(agent.location, tclass = T)
holding = agent.get_held_things(tclass = H)
if (len(items) != 0 and len(holding) != 0):
agent.GTreas1(items[0])
self.delete_thing(items[0])
elif action == "GTreas2":
items = self.list_things_at(agent.location, tclass = t)
if (len(items) != 0 and len(holding) != 0):
agent.GTreas2(items[0])
self.delete_thing(items[0])
else:
print('Not moving')
def is_done(self):
dead_agents = not any(agent.is_alive() for agent in self.agents)
return dead_agents
#agent_no_goals
def step(self):
if not self.is_done():
# print(self.agents[0].holding)
actions = []
for agent in self.agents:
if agent.alive:
actions.append(agent.program(self.percept(agent)))
else:
actions.append("")
for (agent, action) in zip(self.agents, actions):
self.execute_action(agent, action)
self.exogenous_change()
def run(self, steps=1000):
"Run the Environment for given number of time steps."
for step in range(steps):
if self.is_done():
return
self.step()
class Hunter(Agent):
Agent.performance = 50
def move(self, direc):
self.performance -= 1
prevLoc = self.location
self.location = (direc[0],direc[1])
print('Hunter: Moved from {} to {}'.format(prevLoc,self.location))
def get_held_things(self, tclass=Thing):
#Returns the thing held by the agent
return [thing for thing in self.holding
if isinstance(thing, tclass)]
#If Greuse is called, the thing is appended to self.holding
def Greuse(self, thing):
if isinstance(thing, H):
print("Hunter: Grabbed reusable tool at {}.".format(self.location))
self.holding.append(thing)
return True
return False
def Gdispos(self, thing):
# print(isinstance(thing,h))
if isinstance(thing, h):
print("Hunter: Grabbed disposable tool at {}.".format(self.location))
self.holding.append(thing)
return True
return False
def GTreas1(self, thing):
if isinstance(thing, T):
print("Hunter: Grabbed a Treasure1 at {}.".format(self.location))
self.holding.append(thing)
self.performance +=20
print("Hey! my performance is: {}".format(self.performance))
return True
return False
#Removes disposable tool after use
def GTreas2(self, thing):
if isinstance(thing, t):
print("Hunter: Grabbed a Treasure2 at {}.".format(self.location))
self.holding.append(thing)
self.performance += 40
print("Hey! my performance is: {}".format(self.performance))
for tool in self.holding:
if isinstance(tool, t):
self.holding.remove(tool)
print("Disposable tool has been lost")
break
return True
return False
def interpret_input(percept):
in_environment = percept[0]
in_agent = percept[1]
in_location = percept[2]
agent_has_H = False
agent_has_h = False
if len(in_agent) != 0:
for thing in in_agent:
if isinstance(thing,h):
agent_has_h = True
elif isinstance(thing,H):
agent_has_H = True
collectables_exist = False
for thing in in_environment:
if (isinstance(thing, T) or isinstance(thing, t) or isinstance(thing, H) or isinstance(thing, t)):
collectables_exist = True
break
#Check existences
T_exists = False
t_exists = False
H_exists = False
h_exists = False
for thing in in_environment:
if isinstance(thing, T):
T_exists = True
break
for thing in in_environment:
if isinstance(thing, t):
t_exists = True
break
for thing in in_environment:
if isinstance(thing, H):
H_exists = True
break
for thing in in_environment:
if isinstance(thing, h):
h_exists = True
break
for thing in in_agent:
if isinstance(thing, H):
H_exists = True
break
for thing in in_agent:
if isinstance(thing, h):
h_exists = True
break
if len(in_location) != 1:
if isinstance(in_location[1], H):
print("Hey! I found a Greuse")
return 'Greuse'
elif isinstance(in_location[1], h):
print("Hey! I found a Gdispos at: {}".format(in_location[0].location))
return 'Gdispos'
elif (isinstance(in_location[1], T) and agent_has_H):
print("Hey! I found a Treasure")
return 'GTreas1'
elif (isinstance(in_location[1], t) and agent_has_h):
print("Hey! I found a treasure")
return 'GTreas2'
elif (isinstance(in_location[1], T) and not(agent_has_H)):
print("Can't pick up T since I have no H, so I'm moving ")
return 'Move'
elif (isinstance(in_location[1], t) and not (agent_has_h)):
print("Can't pick up t since I have no h, so I'm moving ")
return 'Move'
else:
print("Did'nt move")
return 'NoOp'
if len(in_location) == 1:
if((T_exists and H_exists) or (t_exists and h_exists)):
return 'Move'
else:
# print("Did'nt move")
return 'NoOp'
class Rules():
def __init__(self, action = ''):
self.action = action
def matches(self, a_state):
return self.action == a_state
Explore = Rules("Move")
ReusableTool = Rules("Greuse")
DisposableTool = Rules("Gdispos")
Treasure1 = Rules("GTreas1")
Treasure2 = Rules("GTreas2")
NothingToDo = Rules("NoOp")
rules = [Explore, ReusableTool, DisposableTool, Treasure1, Treasure2, NothingToDo]
# print("Explore:")
# print(Explore)
# print("Greuse:")
island1 = IslandDay()
hunter_bob = Hunter(SimpleReflexAgentProgram(rules, interpret_input))
H1 = H()
T1 = T()
T2 = T()
h1 = h()
h2 = h()
t1 = t()
t2 = t()
island1.add_thing(hunter_bob, (0,0))
island1.add_thing(H1, (1,1))
island1.add_thing(T1, (1,3))
island1.add_thing(T2, (1,4))
island1.add_thing(h1, (1,2))
island1.add_thing(h2, (0,2))
island1.add_thing(t1, (4,1))
island1.add_thing(t2, (4,2))
island1.things
island1.run(30)
island1.things
hunter_bob.holding
myList = ['',2,3,'']
isinstance(myList,int)
```
| github_jupyter |
# The Schrödinger equation
#### Let's have some serious fun!
We'll look at the solutions of the Schrödinger equation for a harmonic potential.
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
from math import pi as Pi
import matplotlib.pyplot as plt
from scipy import (inf, integrate)
import seaborn as sns
sns.set()
```
### Prelude: Hermite's Polynomials
Hermite's Polynomials are a subset of polynomials that will help us construct solutions of the Schrödinger equation.
#### Modelling polynomials
Some object-oriented Python programming with polynomials. We represent an arbitrary polynomial
$$
P(x) = \sum_{n=0}^{N} p_n \cdot x^n
$$
unambiguously by its coefficients $p_n$, i.e. an array of real numbers of length $N+1$. Apart from the algebraic operators we also define the multiplication with x as ```mulx()``` and the differentiation as ```d_dx()```.
```
class Polynomial():
"""
A class representing a polynomial by its coefficients
"""
def __init__(self, array=[0]):
self.p = np.array(array)
def mulx(self):
return Polynomial(np.insert(self.p, 0, 0))
def d_dx(self):
return Polynomial([i*self.p[i] for i in range(1, len(self.p))])
def __eq__(self, other):
return np.equal(self.p, other.p).all()
def __rmul__(self, number):
return Polynomial(number * self.p)
def __sub__(self, other):
l=max(len(self.p), len(other.p))
return Polynomial(Polynomial.pad(self.p,l) - Polynomial.pad(other.p,l))
def __add__(self, other):
l=max(len(self.p), len(other.p))
return Polynomial(Polynomial.pad(self.p,l) + Polynomial.pad(other.p,l))
def __call__(self, x):
return np.sum([self.p[i] * x**i for i in range(len(self.p))], axis=0)
@staticmethod
def pad(array, l):
if len(array) == l:
return array
if len(array) > l:
raise ValueError("can't pad to lower dimension")
return np.append(array, np.zeros(l-len(array)))
@staticmethod
def mono_repr(c, i):
if c==0:
return ''
if i==0:
return str(int(c))
elif i==1:
return "{}x".format(int(c))
else:
if c==1:
return "x^{}".format(i)
else:
return "{}x^{}".format(int(c),i)
def __repr__(self):
return " + ".join(
np.flipud([Polynomial.mono_repr(self.p[i],i)
for i in range(len(self.p)) if self.p[i] != 0] ))
```
#### The Hermite Polynomial generator
Now, Hermite's polynomials are a special subset of all polynomials, defined e.g. by a recursion relation:
From [Wikipedia](https://en.wikipedia.org/wiki/Hermite_polynomials) (if not good memories), we know that
$$
H_n(x) = (2x-\frac{d}{dx})^n \cdot 1
$$
generates the *physicist's* Hermite polynomials. We define our python generator in a recursive fashion returning Polynomial instances
$$
H_n(x) = (2x-\frac{d}{dx}) \cdot H_{n-1}
$$
```
def H(n):
if n<0:
raise ValueError("Not defined for negativ n")
if n==0:
return Polynomial([1])
p = H(n-1)
return 2 * p.mulx() - p.d_dx()
```
Note that we can evaluate the polynomial at any (even complex) x.
```
H_3 = H(3)
H_3, H_3(1), H_3(1+2j)
```
The Hermite polynomials have the special properties:
$$
x \cdot H_\nu(x) = \frac{1}{2} H_{\nu+1}(x) + \nu \cdot H_{\nu-1}(x)
$$
$$
\frac{d}{dx}H_\nu(x) = 2 \nu \cdot H_{\nu-1}(x)
$$
which we can verify using our implementation for the first 10 polynomials ($\nu = {1..9}$):
```
[H(nu).mulx() == .5 * H(nu+1) + nu*H(nu-1) for nu in range(1,10)]
[H(nu).d_dx() == 2 * nu * H(nu - 1) for nu in range(1,10)]
```
---
### The time-dependent Schrödinger equation
$$
i\hbar \frac{\partial \Psi(x,t)}{\partial t} =
\mathcal{H}\Psi(x,t) =
E\Psi(x,t)
$$
This is the Schrödinger equation. Now, with the time-independent Hamilton operator $\mathcal{H}$ for a particle with mass m and the harmonic potential given by $ V(x)=\frac{1}{2}m\omega^2 x^2$ looks like
$$
\mathcal{H} = -\frac{\hbar^2}{2m}\frac{\partial^2}{\partial x^2} + \frac{1}{2}m\omega^2 x^2
$$
we can separate the variables $x$ and $t$ like so:
$$
\Psi(x, t) = \psi(x) \cdot \varphi(t)
$$
and solve both
$$
i\hbar \frac{\partial \varphi(t)}{\partial t} = E \cdot \varphi(t)
$$
and
$$
[-\frac{\hbar^2}{2m}\frac{\partial^2}{\partial x^2} + \frac{1}{2}m\omega^2 x^2] \cdot \psi(x) = E \psi(x)
$$
separately.
A neat trick to get rid of the physical constants is rescaling:
$$\xi = \frac{m \omega}{\hbar} \cdot x$$
with which you can easily check by yourself that the Schrödinger equation becomes:
$$
[ -\frac{\partial^2}{\partial \xi^2} + \xi^2 - \frac{2E}{\hbar \omega}] \cdot \psi(\xi) = 0
$$
where we postulate the boundary conditions for a constrained particle as
$$
\psi(-\infty) = \psi(\infty) = 0
$$
The so-called stationary solutions of the equation in $x$ form an ortho-normal eigenbasis of the Hilbert space of bounded functions $\psi_{\nu}(\xi)$ with eigenvalues $E_{\nu}=\hbar \omega (\nu + \frac{1}{2})$. And although we're not interested in the boring (yawn!) stationary solutions, we'll use this eigenbasis to construct an analytical function that obeys the time-dependent Schrödinger equation.
With the above eigenvalues we finally arrive at the following concise representation of the time-independent Schrödinger equation.
$$
[ -\frac{\partial^2}{\partial \xi^2} + \xi^2 - (2\nu+1)] \cdot \psi(\xi) = 0
$$
### Functions as eigenvectors
The solutions of this equation span a vector space, a so-called Hilbert space. That means we can define addition, multiplication by a number and even an inner product on these functions. When we look at functions as vectors in a Hilbert space, then the Schrödinger equation can as well be considered an eigenvalue problem. We'll provide the solutions without proof.
The eigenfunctions are composed of the Hermite polynomials and a gaussian:
$$
\psi_\nu(\xi) = \frac{1}{\sqrt{2^\nu \cdot \nu! \cdot \sqrt{\pi}}} \cdot H_\nu(\xi) \cdot
e^{-\frac{\xi^2}{2}}
$$
$$
\varphi_\nu(t) = e^{-i (\nu+\frac{1}{2}) t}
$$
Thus arriving at the full solution of the time-dependent Schrödinger equation as
$$
\psi_\nu(\xi, t) = \frac{1}{\sqrt{2^\nu \cdot \nu! \cdot \sqrt{\pi}}} \cdot H_\nu(\xi) \cdot
e^{-\frac{\xi^2}{2}-i(\nu+\frac{1}{2}) t}
$$
These solutions are called stationary because they rotate in the complex plane keeping their shape. That means that for every x the value of $\psi_\nu(x)$ rotates in the complex plane with exactly the same *frequency* as any other. Please note that we have clandestinely scaled the time t such that it *swallowed* the physical constants. For our purpose, namely visualizing the non-stationary solutions of the Schrödinger equation, this does not make a difference.
---
Defining the normalization factor $A_\nu$ as
$$
A_\nu = \frac{1}{\sqrt{2^\nu \cdot \nu! \cdot \sqrt{\pi}}}
$$
we visualize these stationary solutions such that we get an idea what they look like:
```
def A(nu):
return 1/math.sqrt(2**nu * math.factorial(nu) * math.sqrt(math.pi))
def psi(nu):
def _psi(x):
return A(nu) * H(nu)(x) * np.exp(-x*x/2)
return _psi
N_points=200
x_ = np.linspace(-6, 6, N_points)
plt.plot(x_, psi(0)(x_))
plt.plot(x_, psi(1)(x_))
plt.plot(x_, psi(2)(x_))
plt.plot(x_, psi(3)(x_));
```
---
#### Ortho-normal basis
Let's verify that our $\psi_\nu(\xi)$ form an ortho-normal basis with the inner product $\langle \psi_\mu | \psi_\nu \rangle$, $\mathbb{H} \times \mathbb{H} \rightarrow \mathbb{R}$ defined by
$$
\int_{-\infty}^{\infty} \bar{\psi}_\nu(\xi) \cdot \psi_\mu(\xi) d\xi= \delta^{\mu\nu}
$$
$\bar{\psi}_\nu(\xi)$ being the complex conjugate of $\psi_\nu(\xi)$
```
[[round(integrate.quad(lambda x: psi(mu)(x)*psi(nu)(x), -inf, +inf)[0], 6) for mu in range(5)] for nu in range(5)]
```
You can see that all inner products of two basis functions are zero, apart from the product with itself, which is what the *Kronecker* delta $\delta^{\mu \nu}$ demands.
---
### The fun part: coherent solutions
Now, let's have some fun. As we have just verified, the eigenstates of the Schrödinger equation form an ortho-normal basis of the Hilbert space of functions in one dimension. We expect that one can approximate any other bounded function as a linear combination of the first $N$ eigenfunctions. We'll do that for the following shifted gaussian. Note that is is centered around $x=-3$, so it's not equal to the first basis function.
```
x0=-3
fun=lambda x: psi(0)(x-x0)
#sns.set_style("ticks", {"xtick.major.size": 2, "ytick.major.size": .1})
sns.set()
plt.plot(x_, fun(x_));
```
We compute it's coordinates in the Schrödinger eigenbases simply by projecting it to the first $N$ eigenfunctions like this
```
N = 15
coords = [integrate.quad(lambda x: psi(mu)(x)*fun(x), -inf, +inf)[0] for mu in range(N)]
coords
```
Calling those coordinates $c_\nu$, we compute
$$
\psi_0(x-x_0) \approx \big[\sum_{\nu=0}^9 c_\nu \cdot A_\nu H_\nu(x)\big] \cdot e^{-\frac{-x^2}{2}}
$$
```
pol = Polynomial([0])
for nu in range(N):
pol = pol + coords[nu] * A(nu) * H(nu)
projection = lambda x: pol(x) * np.exp(-x*x/2)
plt.plot(x_, projection(x_));
```
What you see is that the 15-dimensional projection of our shifted function into the Schrödinger eigenbasis is a formidable approximation.
It's actually much more than an approximation. You can interpret this function as the wave function of a particle resting (the momentum is zero) at $x=x_0$. Remember there's still the harmonic potential. Thus, in the limit of classical mechanics, we would expect that our particle will slowly accelerate to the right until it *feels* the potential there. Then it would reflect and move all the way back. Lacking friction, we indeed expect that this oscillation continues until eternity.
---
#### Let the clock tick...
Because now we have this function as a linear combination of Schrödinger solutions, we can switch on time and see ourselves. Under the influence of the time-dependent Schrödinger equation, the the fifteen eigenvectors each rotate at their own frequency determined by the eigenvalue $2\nu+1$
The time-dependent solutions
$$
\psi_\nu(\xi, t) = \frac{1}{\sqrt{2^\nu \cdot \nu! \cdot \sqrt{\pi}}} \cdot H_\nu(\xi) \cdot
e^{-\frac{\xi^2}{2}-i(\nu+\frac{1}{2}) t}
$$
Note that now this function is complex-valued!
```
def psit(nu):
def _psi(x, t):
return A(nu) * H(nu)(x) * np.exp(-x*x/2) * np.exp(-1j*(nu+.5)*t)
return _psi
psit(3)(1, .3)
```
---
#### 3-D data
To appreciate the dynamics of a wave function in time we display both the real part and the imaginary part of the complex value of $\psi$.
- The figure's y-axis is our space coordinate $x$
- its z-axis spans the real part of the wave function
- and its x-axis spans the wave function's imaginary part
```
import mpl_toolkits.mplot3d.axes3d as p3
```
We display $\psi_2(x, t) $ at $t=0.5$
```
x_ = np.linspace(-6,6, N_points)
f = psit(2)(x_, 0.5)
r_f = [c.real for c in f]
i_f = [c.imag for c in f]
fig=plt.figure(figsize=(12,8))
ax = fig.gca(projection='3d')
ax.view_init(30, -15)
ax.set_xlim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_xlabel('Imag')
ax.set_ylabel('X')
ax.set_zlabel('Real')
ax.plot(i_f, x_, r_f)
plt.show()
```
As you can see, the function is tilted in the complex plan due to the complex phase $e^{-\frac{5}{2}it}$
---
#### Time-dependent wave functions
Here, we'll create an analytical time-dependent wave function from our set of coordinates in Hilbert space that represent the resting particle at $x_0=-3$
```
def WF(sc):
return lambda x,t: sum([sc[nu] * np.exp(-1j*(nu+.5)*t) * A(nu) * H(nu)(x) * np.exp(-x*x/2)
# ============================== ==================================
# ^ ^
# time dependent coefficient Basis function
for nu in range(len(sc))])
particle = WF(coords)
particle(-3, 0) # a particle resting at x=-3 at time t=0
```
### Animating a Schrödinger particle!
```
%autosave 3600
N_frames=100
N_Points=200
XL, XR = -6, 6
def snapshot(N, f, t):
x = np.linspace(XL,XR, N)
f=f(x, t)
r_f = np.array([c.real for c in f])
i_f = np.array([c.imag for c in f])
return np.array([i_f, x, r_f])
def update(num, n_points, n_frames, wave_function, line):
data= snapshot(n_points, wave_function, num*4.0/n_frames*math.pi)
line.set_data(data[0], data[1])
line.set_3d_properties(data[2])
return line
```
Recording the animation will take a couple of seconds. Be patient. It's worth waiting for!
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
fig=plt.figure(figsize=(12,8))
ax = p3.Axes3D(fig)
initial_data = snapshot(N_points, particle, 0.0)
line = ax.plot(initial_data[0], initial_data[1], initial_data[2])[0]
ax.set_xlim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_ylim(XL, XR)
ax.set_xlabel('Imag')
ax.set_ylabel('X')
ax.set_zlabel('Real')
ax.set_title('Schroedinger particle in action!')
ax.view_init(10, -10)
line_ani = animation.FuncAnimation(
fig, update, N_frames,
fargs=(N_Points, N_frames, particle, line),
interval=200, blit=False)
jshtml = line_ani.to_jshtml()
#Uncomment and run this cell the see the movie. The cell will be so large that the notebook refuses to save. Thus I always comment it out before saving.
#HTML(data=jshtml)
# Uncomment to save your file and serve it elsewhere
#with open("Schroedinger.html", "w") as file:
# file.write(jshtml)
```
---
### Measuring location and momentum
Measurements in the real world are represented by computing expectation values of the operator associated with the given observable.
#### Angle notation
In the following, we denote eigenfunctions of the Schrödinger equation in angle notation
$$
|\nu \rangle \equiv \psi_\nu(x,t)
$$
In our unit-free notation, and introducing a more concise notation for the partial derivative, the momentum operator $\hat{p}$ is defined by
$$
\hat{p} = -i \partial_x
$$
Operators in our Hilbert space will be written in *hat* notation. You have seen $\hat{p}$ already. The Hamilton operator becomes:
$$
\hat{H} = \hat{p}^2 + \hat{x}^2
$$
Note that we're back to using $x$, but what we really mean is the unit-less $\xi$.
The Schrödinger equation in its eigenbasis looks like
$$
\hat{H} |\nu\rangle = 2(\nu+1)|\nu\rangle
$$
The inner product of any two wave functions (not necessarily basisvectors) as defined by the integral over the product of both functions has a neat short notation:
$$
\langle \psi_1 | \psi_2 \rangle
\equiv
\int_{-\infty}^{\infty} \bar{\psi_1}(\xi) \cdot \psi_2(\xi) d\xi
$$
The expectation value of an observable represented by an Operator like e.g. $\hat{p}$, given a particular wave function $\psi$ is defined by
$$
\langle \psi | \hat{p} | \psi \rangle
\equiv
\int_{-\infty}^{\infty} \bar{\psi}(\xi) \cdot (-i\partial_x) \psi(\xi) d\xi
$$
---
#### Dirac's ladder operators
Let us introduce the two *ladder* operators $a$ and $a^\dagger$ as
$$
a \equiv \frac{1}{\sqrt 2} (\hat{x} + i\hat{p})
$$
$$
a^\dagger \equiv \frac{1}{\sqrt 2} (\hat{x} - i\hat{p})
$$
using which we can express $\hat{p}$ and $\hat{x}$ like so:
$$
\hat{p} = \frac{i}{\sqrt 2}(a^\dagger - a)
$$
$$
\hat{x} = \frac{1}{\sqrt 2}(a^\dagger + a)
$$
Then you can convince yourself easily using the properties of the Hermite polynomials:
$$
x \cdot H_\nu(x) = \frac{1}{2} H_{\nu+1}(x) + \nu \cdot H_{\nu-1}(x)
$$
$$
\frac{d}{dx}H_\nu(x) = 2 \nu \cdot H_{\nu-1}(x)
$$
and our solutions of the Schrödinger equations
$$
\psi_\nu(x) = A_\nu \cdot H_\nu(x) \cdot
e^{-\frac{x^2}{2}}
$$
that
$$ a|\nu\rangle = \sqrt{\nu} |\nu-1 \rangle $$
and
$$ a^\dagger|\nu\rangle = \sqrt{\nu+1} |\nu+1 \rangle $$
It should be obvious by now why these operators are called *ladder* operators. They map each basis vector on the next resp. previous basis vector. And this neat property leads to a surprisingly simple method of applying $\hat{p}$ or $\hat{x}$ to arbitrary wave functions.
---
#### Matrix representation
We can compute a matrix representation easily by projecting the the result of every
$a|\nu\rangle$ resp. $a^\dagger|\nu\rangle$ onto every eigenvector.
$$
\langle \mu|a|\nu\rangle = \sqrt{\nu}\cdot\langle \mu | \nu-1\rangle = \sqrt{\nu} \cdot \delta^{\mu,\nu-1}
$$
and
$$
\langle \mu|a^\dagger|\nu\rangle = \sqrt{\nu+1}\cdot\langle \mu | \nu+1\rangle = \sqrt{\nu+1} \cdot \delta^{\mu,\nu+1}
$$
In this matrix representation, the ladder operators populate the positions right above or below the diagonal, respectively.
$$
a = \left[
\begin{array}{c c c c c c}
0 & 1 & 0 & 0 & 0 & 0 & \dots \\
0 & 0 & \sqrt{2} & 0 & 0 & 0 & \dots\\
0 & 0 & 0 & \sqrt{3} & 0 & 0 & \dots\\
0 & 0 & 0 & 0 & \sqrt{4} & 0 & \dots\\
0 & 0 & 0 & 0 & 0 & \sqrt{5} & \dots\\
0 & 0 & 0 & 0 & 0 & 0 & \dots \\
\dots
\end{array}
\right]
$$
$$
a^\dagger =
\left[
\begin{array}{c c c c c c}
0 & 0 & 0 & 0 & 0 & 0 & \dots\\
1 & 0 & 0 & 0 & 0 & 0 & \dots\\
0 & \sqrt{2} & 0 & 0 & 0 & 0 & \dots\\
0 & 0 & \sqrt{3} & 0 & 0 & 0 & \dots\\
0 & 0 & 0 & \sqrt{4} & 0 & 0 & \dots\\
0 & 0 & 0 & 0 & \sqrt{5} & 0 & \dots\\
\dots
\end{array}
\right]
$$
which leads to
$$
\hat{p} = \frac{1}{\sqrt{2}} \cdot \left[
\begin{array}{c c c c c c}
0 & 1 & 0 & 0 & 0 & 0 & \dots\\
i & 0 & \sqrt{2} & 0 & 0 & 0 & \dots\\
0 & i\sqrt{2} & 0 & \sqrt{3} & 0 & 0 & \dots\\
0 & 0 & i\sqrt{3} & 0 & \sqrt{4} & 0 & \dots\\
0 & 0 & 0 & i\sqrt{4} & 0 & \sqrt{5} & \dots\\
0 & 0 & 0 & 0 & i\sqrt{5} & 0 & \dots\\
\dots
\end{array}
\right]
$$
$$
\hat{x} = \frac{1}{\sqrt{2}} \cdot \left[
\begin{array}{c c c c c c}
0 & i & 0 & 0 & 0 & 0 & \dots\\
1 & 0 & i\sqrt{2} & 0 & 0 & 0 & \dots\\
0 & \sqrt{2} & 0 & i\sqrt{3} & 0 & 0 & \dots\\
0 & 0 & \sqrt{3} & 0 & i\sqrt{4} & 0 & \dots\\
0 & 0 & 0 & \sqrt{4} & 0 & i\sqrt{5} & \dots\\
0 & 0 & 0 & 0 & \sqrt{5} & 0 & \dots\\
\dots
\end{array}
\right]
$$
---
With these matrices we can do all our calculations just like highschool algebra! Let's verify that
$$ a|2\rangle = \sqrt{2} \cdot |1\rangle $$
and
$$ a^\dagger |2\rangle = \sqrt{3} \cdot |3\rangle $$
```
N=4 # just so that displaying the matrices doesn't clutter the notebook
```
The ladder operators as numpy arrays:
```
a=np.array([[math.sqrt(nu) if mu==nu-1 else 0.0 for nu in range(N)] for mu in range(N)])
a
a_d=np.array([[math.sqrt(nu+1) if mu==nu+1 else 0.0 for nu in range(N)] for mu in range(N)])
a_d
nu2 = np.array([0, 0, 1, 0])
np.matmul(a, nu2), np.matmul(a_d, nu2)
```
Convinced?
---
#### Expectation values
We can do even more exciting stuff with these matrices. Remember our initial wave function from the movie? It was a gaussian located a x=-3, and I claimed that it was at rest. It's about time to prove both.
The expectation value of the location $x$ is defined by
$$
\langle \psi | \hat{x} | \psi \rangle
\equiv
\int_{-\infty}^{\infty} \bar{\psi}(x) \cdot x \cdot \psi(x) dx
$$
```
# Using the 15-dimensional coordinates of our initial wave function in the Hilbert space spun by the
# solutions of the Schrödinger equation with harmonic potential
c = coords
N = len(coords)
a=np.array([[math.sqrt(nu) if mu==nu-1 else 0.0 for nu in range(N)] for mu in range(N)])
a_d=np.array([[math.sqrt(nu+1) if mu==nu+1 else 0.0 for nu in range(N)] for mu in range(N)])
```
Below we calculate
$$
\langle \psi | \hat{x} | \psi \rangle =
\frac{1}{\sqrt{2}} \cdot (\langle \psi | \hat{a} \psi \rangle + \langle \psi | \hat{a}^\dagger \psi \rangle)
= \frac{1}{\sqrt{2}} \cdot (\psi^T \cdot \mathbb{M} \cdot \psi + \psi^T \cdot \mathbb{M}^\dagger \cdot \psi)
$$
where $\psi^T$ is the transposed vector and $\mathbb{M}, \mathbb{M}^\dagger$ are the matrix representations of the ladder operators $a, a^\dagger$.
```
psi=np.array(coords)
1/math.sqrt(2) * (np.matmul(np.matmul(psi.T, a), psi) + np.matmul(np.matmul(psi.T, a_d), psi))
# Transposing is just for visual clarity.
# Actually, Python would understand the matmul operation correctly, anyway.
```
Convinced? That's almost exactly what we expected.
Btw. we could have been smarter by computing the $\hat{x}$ operator first and then compute the expectation value of it: Let's do that also for $\hat{p}$
$\hat{p} = \frac{i}{\sqrt 2}(a^\dagger - a)$ ;
$\hat{x} = \frac{1}{\sqrt 2}(a^\dagger + a)$:
```
p_hat = 1j/math.sqrt(2) * ( a_d - a )
x_hat = 1/math.sqrt(2) * ( a_d + a )
```
$\langle \psi | \hat{p} | \psi \rangle$:
```
np.matmul(np.matmul(psi.T, p_hat), psi)
```
That's almost zero. C'mon, now you are convinced, right?
---
#### Observing location and momentum over time
```
def psi_t(sc, t):
return np.array([sc[nu] * np.exp(-1j*(nu+.5)*t) for nu in range(N)])
psi_07 = psi_t(psi, 0.7)
psi_07
```
Please note that for complex coefficients we must compute $\langle \psi | $ as the complex conjugate of $| \psi \rangle$
```
np.matmul(np.matmul(np.conj(psi_07).T, p_hat), psi_07)
def p_exp (sc, t):
psit = psi_t(sc, t)
return np.matmul(np.matmul(np.conj(psit).T, p_hat), psit).real
p_exp(psi, .7)
def x_exp (sc, t):
psit = psi_t(sc, t)
return np.matmul(np.matmul(np.conj(psit).T, x_hat), psit).real
x_exp(psi, np.array(0.7))
t_ = np.linspace(0, 2*math.pi, 100)
xt_ = [x_exp(psi, t) for t in t_]
pt_ = [p_exp(psi, t) for t in t_]
plt.plot(xt_, pt_);
```
Just like in classical mechanics, the expectation values of location and momentum form an elipse (in our case even a perfect circle) in the phase space spun by values of $p$ and $x$.
| github_jupyter |
```
%%capture
!pip install python-dp
import syft as sy
duet = sy.join_duet(loopback=True)
# https://github.com/OpenMined/PyDP/blob/dev/examples/Tutorial_1-carrots_demo/carrots_demo.ipynb
# we will not explicitly call pydp.xxx, instead we will call duet.pydp.xxx, which is calling pydp.xxx on the DO side, so it's not neccessary import pydb
# import pydp
duet.store.pandas
# this will allow us to use pydp like duet.pydp.xxx
sy.load_lib("pydp")
BoundedMean = duet.pydp.algorithms.laplacian.BoundedMean
carrots_eaten_ptr = duet.store["carrots_eaten"]
# calculates mean applying differential privacy
def private_mean(privacy_budget: float) -> float:
x_ptr = BoundedMean(privacy_budget, 1, 100)
return x_ptr.quick_result(carrots_eaten_ptr).get(
request_block=True,
name="private_mean",
reason="To get the private_mean",
timeout_secs=10,
)
print("Private Mean: ", private_mean(0.8))
Count = duet.pydp.algorithms.laplacian.Count
carrots_eaten_limit_ptr = duet.store["carrots_eaten_limit"]
# Calculates number of animals who ate more than "limit" carrots applying differential privacy.
def private_count_above(privacy_budget: float) -> int:
x = Count(privacy_budget, dtype="int")
return x.quick_result(carrots_eaten_limit_ptr).get(
request_block=True,
name="private_count_above",
reason="To get the private_count_above",
timeout_secs=10,
)
print("private count above:\t" + str(private_count_above(1)))
Max = duet.pydp.algorithms.laplacian.Max
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_max(privacy_budget: float) -> int:
# 0 and 100 are the upper and lower limits for the search bound.
x = Max(privacy_budget, 0, 100, dtype="int")
return x.quick_result(carrots_eaten_ptr).get(
request_block=True,
name="private_max",
reason="To get the private_max",
timeout_secs=10,
)
print("private max:\t" + str(private_max(1)))
BoundedSum = duet.pydp.algorithms.laplacian.BoundedSum
# Function to calculate sum of carrots eaten applying differential privacy.
def private_sum(privacy_budget: float) -> int:
x = BoundedSum(privacy_budget,1,100, dtype="float")
return x.quick_result(carrots_eaten_ptr).get(
request_block=True,
name="private_count_above",
reason="To get the private_count_above",
timeout_secs=10,
)
print("Private Sum:\t" + str(private_sum(1)))
```
| github_jupyter |
```
import pandas as pd
# movies dataset
movies = pd.read_pickle('./dataset/movies/movies.p')
print(movies.shape)
movies.head()
#taglines dataset
taglines = pd.read_pickle('./dataset/movies/taglines.p')
print(taglines.shape)
taglines.head()
```
## Filter joins
- semi join
- anti join
Mutation join vs filter join
- mutation is commbining data from two tables based on matching obsevation in both tables
- filtering observation from table is based on weather or not they match an observation in another table
### 1. semi joins
- return the intersection, similar to an inner join
- return only column from left table and **not** the rigth
- No duplicated
<img src='./media/semi_join.png' width=700 height=800>
- step 1 --> simple inner join for semi join
- step 2 --> making a filter of semi join
- step 3 --> filtering data
```
#step1 -->simple inner join for semi join
movies_tag = movies.merge(taglines, on='id')
movies_tag.head()
#step 2 --> making a filter of semi join
movies['id'].isin(movies_tag['id'])
# step 3 --> filtering data
tagged_movies = movies[movies['id'].isin(movies_tag['id'])]
tagged_movies.head()
#semi join in one
movies_tag = movies.merge(taglines, on='id')
tagged_movies = movies[movies['id'].isin(movies_tag['id'])]
tagged_movies.head()
```
### 2. anti join
- opposite to semi join
- return the left table, **excluding the intersaction**
- return only column from the left **not** from the right
<img src='./media/anti join.png' width= 700 height=800>
- step 1 --> simple left join for anti join
- step 2 --> making a filter of anti join
```
# step 1 --> simple left join for anti join
movies_tag = movies.merge(taglines, on='id', how='left', indicator=True)
print(movies_tag.shape)
movies_tag.head()
# step 2 --> making a filter for anti join
id_list = movies_tag.loc[movies_tag['_merge']=='left_only', 'id']
pd.DataFrame(id_list).head()
# step 3 --> applying filter
movies_tag = movies.merge(taglines, on='id', how='left', indicator=True)
id_list = movies_tag.loc[movies_tag['_merge']=='left_only', 'id']
non_tagged_movies = movies_tag[movies_tag['id'].isin(id_list)]
non_tagged_movies.head()
```
## Concatenate DataFrames together vertically
- pandas **.concat()** can concatenate both vertically and horizentally
- **axis=0** for vertical
<img src='./media/verticaal_concatenation.png' width= 400 height= 500>
```
jan_movies = movies.iloc[1:5]
jan_movies
feb_movies = movies.iloc[11:15]
feb_movies
march_movies = movies.iloc[21:25]
march_movies
#basic concatenation
pd.concat([jan_movies,feb_movies,march_movies])
# Ignoring the index
pd.concat([jan_movies,feb_movies,march_movies], ignore_index=True)
# Setting labels to original tables
pd.concat([jan_movies,feb_movies,march_movies], ignore_index=False, keys=['jan', 'feb', 'mar'])
jan_tags = taglines.iloc[1:5]
jan_tags
# Concatenate tables with different column names
pd.concat([jan_movies,jan_tags], sort=True) #<-- sorting column name
pd.concat([jan_movies,jan_tags], sort=False) #<-- without sorting column names bydefault False
# Concatenate tables with different column names
pd.concat([jan_movies, jan_tags],join='inner')#<-- applying inner join on columns by default outer
```
### Using append method
**.append()**
- Simplified version of **.concat()**
- suppor : **sort_index** and **sort**
- Not support : **keys** and **join** i:e. always **join == outer**
```
jan_movies.append([feb_movies,march_movies], ignore_index=True, sort=True)
```
## Verifying integrity
<img src= './media/verfying_integrity.png'>
## Validating merges
**.merge(validate=None)**
- check if merge is not specified type
- 'one to one'
- 'one to many'
- 'many to one'
- 'many to many'
```
# lets check it on movies and taglines
print(movies.merge(taglines , on='id', validate='one_to_one').shape)
movies.merge(taglines , on='id', validate='one_to_one').head()
```
if one possible we'll get below error
**Traceback (most recent call last):<br>
MergeError: Merge keys are not unique in right dataset; not a one-to-one merge**
## Verifying concatenations
**.concat(verify_integrity=False)** :
- Check whether the new concatenated index contains duplicates
- Default value is **False**
```
pd.concat([jan_movies,feb_movies], verify_integrity=False)
duplicate_jan_movies = movies.iloc[1:5]
duplicate_feb_movies = movies.iloc[4:5]
pd.concat([duplicate_jan_movies,duplicate_feb_movies], verify_integrity=False)
#<-- Give Error because integrity is true to chk duplicated
pd.concat([duplicate_jan_movies,duplicate_feb_movies], verify_integrity=True)
```
# Practice
### Task1
#### Required datasets
```
employees = pd.read_csv('./employees.csv')
employees.head()
top_cust = pd.read_csv('./top_cust.csv')
top_cust.head()
```
#### requirements
- Merge employees and top_cust with a left join, setting indicator argument to True. Save the result to empl_cust.
- Select the srid column of empl_cust and the rows where _merge is 'left_only'. Save the result to srid_list.
- Subset the employees table and select those rows where the srid is in the variable srid_list and print the results.
```
# Merge employees and top_cust
empl_cust = employees.merge(top_cust, on='srid',
how='left', indicator=True)
# Select the srid column where _merge is left_only
srid_list = empl_cust.loc[empl_cust['_merge'] == 'left_only', 'srid']
# Get employees not working with top customers
employees[employees['srid'].isin(srid_list)]
```
### Task2
#### The required datasets
```
non_mus_tcks = pd.read_csv('./non_musk_tcks.csv')
non_mus_tcks.head()
top_invoices = pd.read_csv('./top_invoices.csv')
top_invoices.head()
genres = pd.read_csv('./genres.csv')
genres.head()
```
#### The required tasks
- Merge non_mus_tcks and top_invoices on tid using an inner join. Save the result as tracks_invoices.
- Use .isin() to subset the rows of non_mus_tck where tid is in the tid column of tracks_invoices. Save the result as top_tracks.
- Group top_tracks by gid and count the tid rows. Save the result to cnt_by_gid.
- Merge cnt_by_gid with the genres table on gid and print the result.
```
non_mus_tcks.info()
top_invoices.info()
def numbers(x):
try:
x = str(x)
return "".join([i for i in x if str.isnumeric(i)])
except:
return 0
non_mus_tcks.tid.apply(numbers).head()
import numpy as np
non_mus_tcks['tid'] = non_mus_tcks['tid'].apply(numbers)
non_mus_tcks['tid'] = non_mus_tcks['tid'].apply(np.int64)
# Merge the non_mus_tck and top_invoices tables on tid
tracks_invoices = non_mus_tcks.merge(top_invoices, on='tid')
# Use .isin() to subset non_mus_tcsk to rows with tid in tracks_invoices
top_tracks = non_mus_tcks[non_mus_tcks['tid'].isin(tracks_invoices['tid'])]
# Group the top_tracks by gid and count the tid rows
cnt_by_gid = top_tracks.groupby(['gid'], as_index=False).agg({'tid':'count'})
# Merge the genres table to cnt_by_gid on gid and print
cnt_by_gid.merge(genres, on='gid')
```
### Task3
#### required datasets
```
tracks_master = pd.read_csv('./tracks_master.csv')
tracks_master.head()
tracks_ride = pd.read_csv('./tracks_ride.csv')
tracks_ride.head()
tracks_st = pd.read_csv('./tracks_st.csv')
tracks_st.head()
```
#### required tasks
- Concatenate tracks_master, tracks_ride, and tracks_st, in that order, setting sort to True.
- Concatenate tracks_master, tracks_ride, and tracks_st, where the index goes from 0 to n-1.
- Concatenate tracks_master, tracks_ride, and tracks_st, showing only columns that are in all tables.
```
# Concatenate the tracks
tracks_from_albums = pd.concat([tracks_master,tracks_ride,tracks_st],
sort=True)
tracks_from_albums.head()
# Concatenate the tracks so the index goes from 0 to n-1
tracks_from_albums = pd.concat([tracks_master, tracks_ride, tracks_st],
ignore_index = True,
sort=True)
tracks_from_albums.head()
# Concatenate the tracks, show only columns names that are in all tables
tracks_from_albums = pd.concat([tracks_master, tracks_ride, tracks_st],join= 'inner', sort=True)
tracks_from_albums.head()
```
### Task4
#### required datasets
```
inv_jul = pd.read_csv('./inv_jul.csv')
inv_jul.head()
inv_aug = pd.read_csv('./inv_aug.csv')
inv_aug.head()
inv_sep = pd.read_csv('./inv_sep.csv')
inv_sep.head()
```
- Concatenate the three tables together vertically in order with the oldest month first, adding '7Jul', '8Aug', and '9Sep' as keys for their respective months, and save to variable avg_inv_by_month.
- Use the .agg() method to find the average of the total column from the grouped invoices.
- Create a bar chart of avg_inv_by_month.
```
# Concatenate the tables and add keys
inv_jul_thr_sep = pd.concat([inv_jul, inv_aug, inv_sep],
keys=['7Jul', '8Aug', '9Sep'])
inv_jul_thr_sep
# inv_jul_thr_sep['total']=inv_jul_thr_sep['total'].astype(float)
inv_jul_thr_sep['total'] = inv_jul_thr_sep['total'].apply(numbers)
inv_jul_thr_sep['total'] = inv_jul_thr_sep['total'].apply(np.int64)
# Group the invoices by the index keys and find avg of the total column
avg_inv_by_month = inv_jul_thr_sep.groupby(level=0).agg({'total':'mean'})
# Bar plot of avg_inv_by_month
avg_inv_by_month.plot(kind='bar')
plt.show()
```
### Task5
#### Required tables
```
artists = pd.read_csv('./artist.csv')
artists.head()
albums = pd.read_csv('./album.csv')
albums.head()
```
- You have been given 2 tables, artists, and albums. Use the console to merge them using artists.merge(albums, on='artid').head(). Adjust the validate argument to answer which statement is False.
1- You can use 'many_to_many' without an error, since there is a duplicate key in one of the tables.
2- You can use 'one_to_many' without error, since there is a duplicate key in the right table.
3- You can use 'many_to_one' without an error, since there is a duplicate key in the left table.
```
# artists.merge(albums, on='artid').head()
# artists.merge(albums, on='artid', validate = 'one_to_many').head()
```
### Task6
#### required file
```
classic_18 = pd.read_csv('./classic_18.csv')
classic_18.head()
classic_19 = pd.read_csv('./classic_19.csv')
classic_19.head()
pop_18 = pd.read_csv('./pop_18.csv')
pop_18.head()
pop_19 = pd.read_csv('./pop_19.csv')
pop_19.head()
```
- Concatenate the classic_18 and classic_19 tables vertically where the index goes from 0 to n-1, and save to classic_18_19.
- Concatenate the pop_18 and pop_19 tables vertically where the index goes from 0 to n-1, and save to pop_18_19.
- With classic_18_19 on the left, merge it with pop_18_19 on tid using an inner join.
- Use .isin() to filter classic_18_19 where tid is in classic_pop.
```
# Concatenate the classic tables vertically
classic_18_19 = pd.concat([classic_18, classic_19], ignore_index=True)
# Concatenate the pop tables vertically
pop_18_19 = pd.concat([pop_18, pop_19], ignore_index=True)
# Merge classic_18_19 with pop_18_19
classic_pop = classic_18_19.merge(pop_18_19, on='tid')
# Using .isin(), filter classic_18_19 rows where tid is in classic_pop
popular_classic = classic_18_19[classic_18_19['tid'].isin(classic_pop['tid'])]
# Print popular chart
print(popular_classic)
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.decomposition import PCA
```
### Generate a dataset
```
xy = np.random.multivariate_normal([0,0], [[10,7],[7,10]],1000)
plt.plot(xy[:,0],xy[:,1],"o")
plt.show()
```
### Create a Principle Component Analysis (PCA) object
What is `n_components`?
```
pca = PCA(n_components=2)
```
`num_components` is the number of axes on which you spread the data out. You can only have as many components as you have axes (2 in this case).
### Fit the axes
What does the following code do?
```
xy_pca = pca.fit(xy)
```
Does the PCA, finding the primary axes of variation.
```
plt.plot(xy[:,0],xy[:,1],"o")
scalar = xy_pca.explained_variance_[0]
plt.plot([0,xy_pca.components_[0,0]*scalar/2],[0,xy_pca.components_[0,1]*scalar/2],color="red")
plt.plot([0,-xy_pca.components_[0,0]*scalar/2],[0,-xy_pca.components_[0,1]*scalar/2],color="red")
scalar = xy_pca.explained_variance_[1]
plt.plot([0,xy_pca.components_[1,0]*scalar/2],[0,xy_pca.components_[1,1]*scalar/2],color="yellow")
plt.plot([0,-xy_pca.components_[1,0]*scalar/2],[0,-xy_pca.components_[1,1]*scalar/2],color="yellow")
```
### What does the following do?
```
xy_trans = xy_pca.transform(xy)
```
Transforms `x` and `y` onto the PCA axes.
```
fig, ax = plt.subplots(1,2,figsize=(10,5))
ax[0].plot(xy[:,0],xy[:,1],"o")
ax[0].set_xlabel("x")
ax[0].set_ylabel("y")
ax[0].set_xlim((-15,15)); ax[0].set_ylim((-15,15))
ax[1].plot(xy_trans[:,0],xy_trans[:,1],"o")
ax[1].set_xlabel("PCA1")
ax[1].set_ylabel("PCA2")
ax[1].set_xlim((-15,15)); ax[1].set_ylim((-15,15))
plt.show()
```
### What does the following do?
```
print("Variation explained:")
print("First component: {:.3f}".format(xy_pca.explained_variance_ratio_[0]))
print("Second component: {:.3f}".format(xy_pca.explained_variance_ratio_[1]))
```
Describes how much variation each PCA axis captures.
Informally: if you only included the first component in a predictive model, the $R^{2}$ between you prediction and reality would be 0.85.
### Some helper code, which takes an xy_pair and does all of the steps above.
```
def pca_wrapper(xy_pairs):
"""
Take an array of x/y data and perform a principle component analysis.
"""
fig, ax = plt.subplots(1,2,figsize=(10,5))
ax[0].plot(xy_pairs[:,0],xy_pairs[:,1],"o")
ax[0].set_xlim((-18,18))
ax[0].set_ylim((-18,18))
ax[0].set_title("raw x,y data")
ax[0].set_xlabel("x")
ax[0].set_ylabel("y")
# Perform the PCA fit
pca = PCA(n_components=2)
z = pca.fit(xy_pairs)
# Transforom the data onto the new PCA axes
new_xy_pairs = z.transform(xy_pairs)
# Plot the PCA data
ax[1].plot(new_xy_pairs[:,0],new_xy_pairs[:,1],"o")
ax[1].set_title("PCA transformed data")
ax[1].set_xlim((-18,18))
ax[1].set_ylim((-18,18))
ax[1].set_xlabel("PCA1")
ax[1].set_ylabel("PCA2")
print("Variation explained:")
print("First component: {:.3f}".format(pca.explained_variance_ratio_[0]))
print("Second component: {:.3f}".format(pca.explained_variance_ratio_[1]))
```
### How does fraction variation relate to skew in the data?
```
d1 = np.random.multivariate_normal([0,0], [[10,1],[1,10]],1000)
pca_wrapper(d1)
d2 = np.random.multivariate_normal([0,0], [[10,5],[5,10]],1000)
pca_wrapper(d2)
d3 = np.random.multivariate_normal([0,0], [[10,9],[9,10]],1000)
pca_wrapper(d3)
```
The stronger the covariation between parameters, the more readily the PCA can reduce dimensionality.
### Using PCA to try to classify things
### The "Iris" dataset
<img style="margin:auto" align="center" src="https://www.math.umd.edu/~petersd/666/html/iris_with_labels.jpg" />
+ Three species of iris
+ Four properties measured for many representatives from each species
+ Properties are: sepal length, sepal width, petal length, petal width
### Load in the data
```
iris = datasets.load_iris()
obs = iris.data
species = iris.target
mean = obs.mean(axis=0)
std = obs.std(axis=0)
obs = (obs - mean)/std
```
The mean, standard deviation business normalizes the data so the values are all on the same scale.
```
def plot_slice(obs_r,axis_i,axis_j):
"""
Define a helper function.
"""
plt.plot(obs_r[species == 0,axis_i],obs_r[species == 0,axis_j],"o",color='navy')
plt.plot(obs_r[species == 1,axis_i],obs_r[species == 1,axis_j],"o",color='turquoise')
plt.plot(obs_r[species == 2,axis_i],obs_r[species == 2,axis_j],"o",color='darkorange')
plt.xlabel(axis_i)
plt.ylabel(axis_j)
plt.show()
```
### Species separate on some axes, but not all axes
```
plot_slice(obs,axis_i=0,axis_j=1)
```
### Do PCA
```
pca = PCA(n_components=4)
obs_pca = pca.fit(obs)
obs_trans = obs_pca.transform(obs)
```
### What is different about PCA axes?
```
plot_slice(obs_trans,axis_i=0,axis_j=1)
```
All of that separating power is jammed into the first axis.
### Quantify this with explained varience ratio:
```
for r in obs_pca.explained_variance_ratio_:
print("{:.3f}".format(r))
```
### Summary
+ PCA is a way to spread data out on "natural" axes
+ Clusters in PCA space can be used to classify things
+ Axes may be hard to interpret directly
| github_jupyter |
```
%matplotlib inline
import numpy as np
import pylab as plt
import ccgpack as ccg
from itertools import product
from matplotlib.colors import LogNorm
cl = np.load('../data/cl_planck_lensed.npy')
sfs = ccg.StochasticFieldSimulator(cl)
nside = 1024
size = 30
ms = []
for i in range(4):
ms.append(sfs.simulate(nside,size))
fig,((ax1,ax2),(ax3,ax4)) = plt.subplots(ncols=2
,nrows=2,figsize=(6 ,6))
ax1.imshow(ms[0])
ax2.imshow(ms[1])
ax3.imshow(ms[2])
ax4.imshow(ms[3])
# ll0 = cl[:600,0]
# dl0 = cl[:600,1]*(ll0[:600]*(ll0[:600]+1)/(2*np.pi))
# ll,p1 = ccg.power_spectrum(ms[0],size=15)
# plt.plot(ll0,dl0,'k--')
# plt.plot(ll[:600],p1[:600],'b')
# plt.xscale('log')
# plt.yscale('log')
# plt.xlim(2,600)
# # plt.ylim(5e-8,5e4)
cor,ecor = ccg.correlarion_fucntion(ms[0],n_p=1e6)
plt.plot(cor)
ksi = ccg.ppcf(ms[0],2,1e6,700)
plt.plot(ksi)
def N1(d,num=100,gt=True):
nu = np.linspace(d.min(),d.max(),num)
n1 = []
for i in nu:
if gt:
n1.append(np.mean(d>i))
else:
n1.append(np.mean(d<i))
n1 = np.array(n1)
return nu,n1
def exterma(arr,peak=True):
dim = len(arr.shape) # number of dimensions
offsets = [0, -1, 1] # offsets, 0 first so the original entry is first
filt = np.ones(arr.shape,dtype=np.int8)
for shift in product(offsets, repeat=dim):
if np.all(np.array(shift)==0):
continue
# print(shift)
# print(np.roll(b, shift, np.arange(dim)))
rolled = np.roll(arr, shift, np.arange(dim))
if peak:
filt = filt*(arr>rolled)
else:
filt = filt*(arr<rolled)
return filt
ms[0] = ms[0]-ms[0].mean()
ms[0] = ms[0]/ms[0].std()
nu,n1_gt = N1(ms[0],num=100,gt=True)
plt.plot(nu,n1_gt)
ms[0] = ms[0]-ms[0].mean()
ms[0] = ms[0]/ms[0].std()
nu,n1_lt = N1(ms[0],num=100,gt=False)
plt.plot(nu,n1_lt)
plt.plot(nu[:-1],np.diff(n1_gt))
plt.plot(nu[:-1],np.diff(n1_lt))
th = 0
mcopy = ms[0]+0
peaks = exterma(mcopy ,peak=True)
mcopy[np.logical_not(peaks.astype(bool))] = 0
mcopy[mcopy<th] = 0
nf1 = np.argwhere(mcopy).T
nnn = 5*nf1.shape[1]
rlist = np.random.randint(0,1024,(2,nnn))
ksi1 = ccg.ffcf_no_random(fl1=nf1, fl2=nf1, rlist=rlist, rmax=700)
# plt.plot(ksi1)
fig,(ax1,ax2) = plt.subplots(1,2,figsize=(16,8))
ax1.imshow(mcopy,cmap='gray')
rimg = np.zeros(mcopy.shape)
rows, cols = zip(*rlist.T)
rimg[rows, cols] = 1
ax2.imshow(rimg,cmap='gray')
mask = np.zeros(ms[0].shape)+1
mask[700:1000,100:400] = 0
mask[100:300,700:900] = 0
mask[100:300,200:400] = 0
mask[700:800,700:890] = 0
mcopy = ms[0]*mask+0
peaks = exterma(mcopy ,peak=True)
mcopy[np.logical_not(peaks.astype(bool))] = 0
mcopy[mcopy<th] = 0
nf1 = np.argwhere(mcopy).T
nnn = 5*nf1.shape[1]
rlist = np.random.randint(0,1024,(nnn,2))
rimg = np.zeros(mcopy.shape)
rows, cols = zip(*rlist)
rimg[rows, cols] = 1
rimg = rimg*mask
rlist = np.argwhere(rimg).T
ksi2 = ccg.ffcf_no_random(fl1=nf1, fl2=nf1, rlist=rlist, rmax=700)
fig,(ax1,ax2) = plt.subplots(1,2,figsize=(16,8))
ax1.imshow(mcopy,cmap='gray')
ax2.imshow(rimg,cmap='gray')
rlist = np.random.randint(0,1024,(2,nnn))
ksi3 = ccg.ffcf_no_random(fl1=nf1, fl2=nf1, rlist=rlist, rmax=700)
ksi4 = eval_ksi(ms[0],mask,thresholds=[0],peak=True)[0]
# plt.plot(ksi)
plt.plot(ksi1,'r',label='normal')
plt.plot(ksi2,'b',label='both_masked')
plt.plot(ksi4,'k',label='func')
plt.plot(ksi3,'g',label='peak_masked')
plt.legend()
plt.savefig('tpcf.jpg',dpi=150)
# thresholds = [-2,-1,0,1,2]
# ksis = eval_ksi(ms[0],mask,thresholds,peak=True,rmax=700,crand=5)
for i in range(len(ksis)):
plt.plot(ksis[i],label=str(thresholds[i]))
plt.xlim(-1,100)
plt.legend()
thresholds = [-2,-1,0,1,2]
ksis = eval_ksi(ms[0],mask,thresholds,peak=False,rmax=700,crand=5)
for i in range(len(ksis)):
plt.plot(ksis[i],label=str(thresholds[i]))
plt.xlim(-1,100)
plt.legend()
def eval_ksi(m,mask,thresholds,peak=True,rmax=700,crand=5):
ksis = []
mc1 = m*mask
nside = mc1.shape[0]
peaks = exterma(mc1 ,peak=peak)
mc1[np.logical_not(peaks.astype(bool))] = 0
for th in thresholds:
mc2 = mc1+0
if peak:
mc2[mc2<th] = 0
else:
mc2[mc2>th] = 0
nf1 = np.argwhere(mc2).T
nnn = crand*nf1.shape[1]
rlist = np.random.randint(0,nside,(nnn,2))
rimg = np.zeros(mc2.shape)
rows, cols = zip(*rlist)
rimg[rows, cols] = 1
rimg = rimg*mask
rlist = np.argwhere(rimg).T
ksis.append(ccg.ffcf_no_random(fl1=nf1, fl2=nf1, rlist=rlist, rmax=rmax))
return ksis
# def bias(m,ths,kmin,kmax):
# if not isinstance(ths, list):
# ths = [ths]
# bs = []
# for th in ths:
# ksi = ccg.ppcf(m,th,1e6,700)
# biask = np.sqrt(np.absolute(ksi[:700]/cor[:700]))
# bs.append(np.mean(biask[kmin:kmax]))
# return bs
# ths = [0.5,1.0,1.5,2.0,2.5]
# kmin = 10
# kmax = 50
# bs = bias(ms[0],ths,kmin,kmax)
# bsth = np.array(ths)
# plt.plot(ths,bsth,'r--')
# plt.plot(ths,bs,'bo')
# plt.xlabel(r'$\nu$',fontsize=15)
# plt.ylabel(r'$b(\nu)$',fontsize=15)
```
| github_jupyter |
# Quantum chemistry with VQE
This tutorial will show you how to solve an important problem for quantum chemistry using PennyLane on Amazon Braket: finding the ground-state energy of a molecule. The problem can be tackled using near-term quantum hardware by implementing the variational quantum eigensolver (VQE) algorithm. You can find further details on quantum chemistry and VQE in both the [Braket VQE](../hybrid_quantum_algorithms/VQE_Chemistry/VQE_chemistry_braket.ipynb) notebook and PennyLane [tutorials](https://pennylane.ai/qml/demos_basics.html).
## From quantum chemistry to quantum circuits
Our first step is to convert our chemistry problem into something that can be tackled with a quantum computer. To do this, we will use PennyLane's ``qchem`` package. If running on a local machine, the ``qchem`` package must be installed separately by following [these](https://pennylane.readthedocs.io/en/stable/introduction/chemistry.html) instructions.
```
import pennylane as qml
from pennylane import qchem
from pennylane import numpy as np
```
The input chemistry data is often provided in the form of a geometry file containing details about the molecule. Here, we consider the hydrogen molecule $\mathrm{H}_2$ whose atomic structure is stored in the [h2.xyz](./qchem/h2.xyz) file. The qubit Hamiltonian for $\mathrm{H}_2$ is built using the ``qchem`` package.
```
h, qubits = qchem.molecular_hamiltonian(name="h2", geo_file="qchem/h2.xyz")
print(h)
```
In the VQE algorithm, we compute the energy of the $\mathrm{H}_2$ molecule by measuring the expectation value of the above Hamiltonian on a variational quantum circuit. Our objective is to train the parameters of the circuit so that the expectation value of the Hamiltonian is minimized, thereby finding the ground state energy of the molecule.
In this tutorial, we also want to compute the total spin. To that aim, we use the ``qchem`` package to build the total-spin operator $S^2$:
```
electrons = 2 # Molecular hydrogen has two electrons
S2 = qchem.spin2(electrons, qubits)
print(S2)
```
## Grouping observables to reduce circuit executions
Suppose we want to measure the expectation value of the electronic Hamiltonian ``h``. This Hamiltonian is composed of 15 individual observables that are tensor products of Pauli operators:
```
print("Number of Pauli terms in h:", len(h.ops))
```
A straightforward approach to measuring the expectation value would be to implement the circuit 15 times, and each time measuring one of the Pauli terms that form part of the Hamiltonian ``h``. However, we could be more efficient. The Pauli terms can be separated into groups (see PennyLane's [grouping](https://pennylane.readthedocs.io/en/stable/code/qml_grouping.html) module) that can be measured concurrently on a single circuit. Elements of each group are known as qubit-wise commuting observables. The Hamiltonian ``h`` can be split into 5 groups:
```
groups, coeffs = qml.grouping.group_observables(h.ops, h.coeffs)
print("Number of qubit-wise commuting groups:", len(groups))
```
Practically, this means that instead of executing 15 separate circuits, we just need to execute 5. This saving can become even more pronounced as the number of Pauli terms in the Hamiltonian increases. For example, switching to a larger molecule or a different chemical basis set can increase both the number of qubits and the number of terms.
Fortunately, the PennyLane/Braket pipeline has builtin support for pre-grouping the observables in a Hamiltonian to minimize the number of device executions, saving both runtime and simulation fees when using remote devices. Optimized observable grouping will be used in the rest of this tutorial.

## Defining an ansatz circuit
We now set up the ansatz circuit that will be trained to prepare the ground state of the Hamiltonian. Our first step is to load the local Braket device:
```
dev = qml.device("braket.local.qubit", wires=qubits)
```
This tutorial uses a chemically-inspired circuit called the Unitary Coupled-Cluster Singles and Doubles ([UCCSD](https://pennylane.readthedocs.io/en/stable/code/api/pennylane.templates.subroutines.UCCSD.html)) ansatz. To use this, we must define some additional inputs from quantum chemistry.
```
ref_state = qchem.hf_state(electrons, qubits) # Hartree-Fock state
excitations = qchem.excitations(electrons, qubits) # generate single- and double-excitations
s_wires, d_wires = qchem.excitations_to_wires(*excitations) # map excitations to the wires that the UCCSD circuit will act on
```
<div class="alert alert-block alert-info">
<b>Note</b> A variety of alternative ansätze and templates are <a href="https://pennylane.readthedocs.io/en/latest/code/qml_templates.html#module-pennylane.templates.layers">available</a> and different choices will result in varying circuit depth and number of trainable parameters.
</div>
Our ansatz circuit is then simple to define:
```
def circuit(params, wires):
qml.templates.UCCSD(params, init_state=ref_state, s_wires=s_wires, d_wires=d_wires, wires=wires)
```
Note that an output measurement has not yet been defined! This is the next step.
## Measuring the energy and total spin
We discussed earlier that we want to minimize the expectation value of the qubit Hamiltonian, corresponding to the energy of $\mathrm{H}_2$. The expectation values of this Hamiltonian and the total spin $\hat{S}^2$ operator can be defined using:
```
energy_expval = qml.ExpvalCost(circuit, h, dev, optimize=True)
S2_expval = qml.ExpvalCost(circuit, S2, dev, optimize=True)
```
Notice the ``optimize=True`` option. This instructs PennyLane and Braket to break up each Hamiltonian into qubit-wise commuting groups for increased device-execution efficiency.
Let's now initialize some random values and evaluate the energy and spin. The total spin $S$ of the prepared state can be obtained from the expectation value $\langle \hat{S}^2 \rangle$ using $S=-\frac{1}{2} + \sqrt{\frac{1}{4} + \langle \hat{S}^2 \rangle}$. We can define a function to compute $S$:
```
def spin(params):
return -0.5 + np.sqrt(1 / 4 + S2_expval(params))
np.random.seed(1967)
params = np.random.normal(0, np.pi, len(s_wires) + len(d_wires))
```
The energy and total spin are then
```
print("Energy:", energy_expval(params))
print("Spin: ", spin(params))
```
Since we have picked random parameters, the measured energy does not correspond to the ground state energy and the prepared state is not an eigenstate of the total-spin operator. We must now train the parameters to find the minimum energy.
## Minimizing the energy
The energy can be minimized by choosing an optimizer and running the standard optimization loop:
```
opt = qml.GradientDescentOptimizer(stepsize=0.4)
iterations = 40
energies = []
spins = []
for i in range(iterations):
params = opt.step(energy_expval, params)
e = energy_expval(params)
s = spin(params)
energies.append(e)
spins.append(s)
if (i + 1) % 5 == 0:
print(f"Completed iteration {i + 1}")
print("Energy:", e)
print("Total spin:", s)
print("----------------")
print(f"Optimized energy: {e} Ha")
print(f"Corresponding total spin: {s}")
```
The exact value for the ground state energy of molecular hydrogen has been theoretically calculated as ``-1.136189454088`` Hartrees (Ha). Notice that the optimized energy is off by less than a thousandth of a Hartree. Furthermore, the optimized state is an eigenstate of the total-spin operator with eigenvalue $S=0$ as expected for the ground state of the $\mathrm{H}_2$ molecule. Hence, our above results look very promising! We would get even closer to the theory values if we increase the number of iterations.
Let's visualize how the two quantities evolved during optimization:
```
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
theory_energy = -1.136189454088
theory_spin = 0
plt.hlines(theory_energy, 0, 39, linestyles="dashed", colors="black")
plt.plot(energies)
plt.xlabel("Steps")
plt.ylabel("Energy")
axs = plt.gca()
inset = inset_axes(axs, width="50%", height="50%", borderpad=1)
inset.hlines(theory_spin, 0, 39, linestyles="dashed", colors="black")
inset.plot(spins, "r")
inset.set_xlabel("Steps")
inset.set_ylabel("Total spin");
```
We have learned how to efficiently find the ground state energy of a molecule using the PennyLane/Braket pipeline!
<div class="alert alert-block alert-info">
<b>What's next?</b> The <code>qchem</code> folder contains additional molecular structure files for different atomic separations of molecular hydrogen. Pick one of the separations and find the ground state energy. How does the ground state energy change with atomic separation?
</div>
| github_jupyter |
# Counterfactual explanations with ordinally encoded categorical variables
This example notebook illustrates how to obtain [counterfactual explanations](https://docs.seldon.io/projects/alibi/en/latest/methods/CFProto.html) for instances with a mixture of ordinally encoded categorical and numerical variables. A more elaborate notebook highlighting additional functionality can be found [here](./cfproto_cat_adult_ohe.ipynb). We generate counterfactuals for instances in the *adult* dataset where we predict whether a person's income is above or below $50k.
```
import tensorflow as tf
tf.get_logger().setLevel(40) # suppress deprecation messages
tf.compat.v1.disable_v2_behavior() # disable TF2 behaviour as alibi code still relies on TF1 constructs
from tensorflow.keras.layers import Dense, Input, Embedding, Concatenate, Reshape, Dropout, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
from sklearn.preprocessing import OneHotEncoder
from time import time
from alibi.datasets import fetch_adult
from alibi.explainers import CounterfactualProto
print('TF version: ', tf.__version__)
print('Eager execution enabled: ', tf.executing_eagerly()) # False
```
## Load adult dataset
The `fetch_adult` function returns a `Bunch` object containing the features, the targets, the feature names and a mapping of the categories in each categorical variable.
```
adult = fetch_adult()
data = adult.data
target = adult.target
feature_names = adult.feature_names
category_map_tmp = adult.category_map
target_names = adult.target_names
```
Define shuffled training and test set:
```
def set_seed(s=0):
np.random.seed(s)
tf.random.set_seed(s)
set_seed()
data_perm = np.random.permutation(np.c_[data, target])
X = data_perm[:,:-1]
y = data_perm[:,-1]
idx = 30000
y_train, y_test = y[:idx], y[idx+1:]
```
Reorganize data so categorical features come first:
```
X = np.c_[X[:, 1:8], X[:, 11], X[:, 0], X[:, 8:11]]
```
Adjust `feature_names` and `category_map` as well:
```
feature_names = feature_names[1:8] + feature_names[11:12] + feature_names[0:1] + feature_names[8:11]
print(feature_names)
category_map = {}
for i, (_, v) in enumerate(category_map_tmp.items()):
category_map[i] = v
```
Create a dictionary with as keys the categorical columns and values the number of categories for each variable in the dataset. This dictionary will later be used in the counterfactual explanation.
```
cat_vars_ord = {}
n_categories = len(list(category_map.keys()))
for i in range(n_categories):
cat_vars_ord[i] = len(np.unique(X[:, i]))
print(cat_vars_ord)
```
## Preprocess data
Scale numerical features between -1 and 1:
```
X_num = X[:, -4:].astype(np.float32, copy=False)
xmin, xmax = X_num.min(axis=0), X_num.max(axis=0)
rng = (-1., 1.)
X_num_scaled = (X_num - xmin) / (xmax - xmin) * (rng[1] - rng[0]) + rng[0]
X_num_scaled_train = X_num_scaled[:idx, :]
X_num_scaled_test = X_num_scaled[idx+1:, :]
```
Combine numerical and categorical data:
```
X = np.c_[X[:, :-4], X_num_scaled].astype(np.float32, copy=False)
X_train, X_test = X[:idx, :], X[idx+1:, :]
print(X_train.shape, X_test.shape)
```
## Train a neural net
The neural net will use entity embeddings for the categorical variables.
```
def nn_ord():
x_in = Input(shape=(12,))
layers_in = []
# embedding layers
for i, (_, v) in enumerate(cat_vars_ord.items()):
emb_in = Lambda(lambda x: x[:, i:i+1])(x_in)
emb_dim = int(max(min(np.ceil(.5 * v), 50), 2))
emb_layer = Embedding(input_dim=v+1, output_dim=emb_dim, input_length=1)(emb_in)
emb_layer = Reshape(target_shape=(emb_dim,))(emb_layer)
layers_in.append(emb_layer)
# numerical layers
num_in = Lambda(lambda x: x[:, -4:])(x_in)
num_layer = Dense(16)(num_in)
layers_in.append(num_layer)
# combine
x = Concatenate()(layers_in)
x = Dense(60, activation='relu')(x)
x = Dropout(.2)(x)
x = Dense(60, activation='relu')(x)
x = Dropout(.2)(x)
x = Dense(60, activation='relu')(x)
x = Dropout(.2)(x)
x_out = Dense(2, activation='softmax')(x)
nn = Model(inputs=x_in, outputs=x_out)
nn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return nn
set_seed()
nn = nn_ord()
nn.summary()
nn.fit(X_train, to_categorical(y_train), batch_size=128, epochs=30, verbose=0)
```
## Generate counterfactual
Original instance:
```
X = X_test[0].reshape((1,) + X_test[0].shape)
```
Initialize counterfactual parameters:
```
shape = X.shape
beta = .01
c_init = 1.
c_steps = 5
max_iterations = 500
rng = (-1., 1.) # scale features between -1 and 1
rng_shape = (1,) + data.shape[1:]
feature_range = ((np.ones(rng_shape) * rng[0]).astype(np.float32),
(np.ones(rng_shape) * rng[1]).astype(np.float32))
```
Initialize explainer. Since the `Embedding` layers in `tf.keras` do not let gradients propagate through, we will only make use of the model's predict function, treat it as a black box and perform numerical gradient calculations.
```
set_seed()
# define predict function
predict_fn = lambda x: nn.predict(x)
cf = CounterfactualProto(predict_fn,
shape,
beta=beta,
cat_vars=cat_vars_ord,
max_iterations=max_iterations,
feature_range=feature_range,
c_init=c_init,
c_steps=c_steps,
eps=(.01, .01) # perturbation size for numerical gradients
)
```
Fit explainer. Please check the [documentation](https://docs.seldon.io/projects/alibi/en/latest/methods/CFProto.html) for more info about the optional arguments.
```
cf.fit(X_train, d_type='abdm', disc_perc=[25, 50, 75]);
```
Explain instance:
```
set_seed()
explanation = cf.explain(X)
```
Helper function to more clearly describe explanations:
```
def describe_instance(X, explanation, eps=1e-2):
print('Original instance: {} -- proba: {}'.format(target_names[explanation.orig_class],
explanation.orig_proba[0]))
print('Counterfactual instance: {} -- proba: {}'.format(target_names[explanation.cf['class']],
explanation.cf['proba'][0]))
print('\nCounterfactual perturbations...')
print('\nCategorical:')
X_orig_ord = X
X_cf_ord = explanation.cf['X']
delta_cat = {}
for i, (_, v) in enumerate(category_map.items()):
cat_orig = v[int(X_orig_ord[0, i])]
cat_cf = v[int(X_cf_ord[0, i])]
if cat_orig != cat_cf:
delta_cat[feature_names[i]] = [cat_orig, cat_cf]
if delta_cat:
for k, v in delta_cat.items():
print('{}: {} --> {}'.format(k, v[0], v[1]))
print('\nNumerical:')
delta_num = X_cf_ord[0, -4:] - X_orig_ord[0, -4:]
n_keys = len(list(cat_vars_ord.keys()))
for i in range(delta_num.shape[0]):
if np.abs(delta_num[i]) > eps:
print('{}: {:.2f} --> {:.2f}'.format(feature_names[i+n_keys],
X_orig_ord[0,i+n_keys],
X_cf_ord[0,i+n_keys]))
describe_instance(X, explanation)
```
The person's incomce is predicted to be above $50k by increasing his or her capital gain.
| github_jupyter |
# Stateful Model Feedback Metrics Server
In this example we will add statistical performance metrics capabilities by levering the Seldon metrics server.
Dependencies
* Seldon Core installed
* Ingress provider (Istio or Ambassador)
An easy way is to run `examples/centralized-logging/full-kind-setup.sh` and then:
```bash
helm delete seldon-core-loadtesting
helm delete seldon-single-model
```
Then port-forward to that ingress on localhost:8003 in a separate terminal either with:
Ambassador:
kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080
Istio:
kubectl port-forward -n istio-system svc/istio-ingressgateway 8003:80
```
!kubectl create namespace seldon || echo "namespace already created"
!kubectl config set-context $(kubectl config current-context) --namespace=seldon
!mkdir -p config
```
### Create a simple model
We create a multiclass classification model - iris classifier.
The iris classifier takes an input array, and returns the prediction of the 4 classes.
The prediction can be done as numeric or as a probability array.
```
%%bash
kubectl apply -f - << END
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: multiclass-model
spec:
predictors:
- graph:
children: []
implementation: SKLEARN_SERVER
modelUri: gs://seldon-models/v1.13.0-dev/sklearn/iris
name: classifier
logger:
url: http://seldon-multiclass-model-metrics.seldon.svc.cluster.local:80/
mode: all
name: default
replicas: 1
END
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=multiclass-model -o jsonpath='{.items[0].metadata.name}')
```
#### Send test request
```
res=!curl -X POST "http://localhost:8003/seldon/seldon/multiclass-model/api/v1.0/predictions" \
-H "Content-Type: application/json" -d '{"data": { "ndarray": [[1,2,3,4]]}, "meta": { "puid": "hello" }}'
print(res)
import json
j=json.loads(res[-1])
assert(len(j["data"]["ndarray"][0])==3)
```
### Metrics Server
You can create a kubernetes deployment of the metrics server with this:
```
%%writefile config/multiclass-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: seldon-multiclass-model-metrics
namespace: seldon
labels:
app: seldon-multiclass-model-metrics
spec:
replicas: 1
selector:
matchLabels:
app: seldon-multiclass-model-metrics
template:
metadata:
labels:
app: seldon-multiclass-model-metrics
spec:
securityContext:
runAsUser: 8888
containers:
- name: user-container
image: seldonio/alibi-detect-server:1.13.0-dev
imagePullPolicy: IfNotPresent
args:
- --model_name
- multiclassserver
- --http_port
- '8080'
- --protocol
- seldonfeedback.http
- --storage_uri
- "adserver.cm_models.multiclass_one_hot.MulticlassOneHot"
- --reply_url
- http://message-dumper.default
- --event_type
- io.seldon.serving.feedback.metrics
- --event_source
- io.seldon.serving.feedback
- MetricsServer
env:
- name: "SELDON_DEPLOYMENT_ID"
value: "multiclass-model"
- name: "PREDICTIVE_UNIT_ID"
value: "classifier"
- name: "PREDICTIVE_UNIT_IMAGE"
value: "seldonio/alibi-detect-server:1.13.0-dev"
- name: "PREDICTOR_ID"
value: "default"
---
apiVersion: v1
kind: Service
metadata:
name: seldon-multiclass-model-metrics
namespace: seldon
labels:
app: seldon-multiclass-model-metrics
spec:
selector:
app: seldon-multiclass-model-metrics
ports:
- protocol: TCP
port: 80
targetPort: 8080
!kubectl apply -n seldon -f config/multiclass-deployment.yaml
!kubectl rollout status deploy/seldon-multiclass-model-metrics
import time
time.sleep(20)
```
### Send feedback
```
res=!curl -X POST "http://localhost:8003/seldon/seldon/multiclass-model/api/v1.0/feedback" \
-H "Content-Type: application/json" \
-d '{"response": {"data": {"ndarray": [[0.0006985194531162841,0.003668039039435755,0.9956334415074478]]}}, "truth":{"data": {"ndarray": [[0,0,1]]}}}'
print(res)
import json
j=json.loads(res[-1])
assert("data" in j)
import time
time.sleep(3)
```
### Check that metrics are recorded
```
res=!kubectl logs $(kubectl get pods -l app=seldon-multiclass-model-metrics \
-n seldon -o jsonpath='{.items[0].metadata.name}') | grep "PROCESSING Feedback Event"
print(res)
assert(len(res)>0)
```
### Cleanup
```
!kubectl delete -n seldon -f config/multiclass-deployment.yaml
!kubectl delete sdep multiclass-model
```
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
---
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
---
**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
---
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
## Import Packages
```
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
from processing import processing
import os
import timeit
from moviepy.editor import VideoFileClip
from IPython.display import HTML
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
```
## Ideas for Lane Detection Pipeline
**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
`cv2.inRange()` for color selection
`cv2.fillPoly()` for regions selection
`cv2.line()` to draw lines on an image given endpoints
`cv2.addWeighted()` to coadd / overlay two images
`cv2.cvtColor()` to grayscale or change color
`cv2.imwrite()` to output images to file
`cv2.bitwise_and()` to apply a mask to an image
**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
## Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
```
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
```
## Test Images
Build your pipeline to work on the images in the directory "test_images"
**You should make sure your pipeline works well on these images before you try the videos.**
```
import os
os.listdir("test_images/")
def perform_magic_py(img):
image = mpimg.imread(img)
img=os.path.basename(img)
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
#grayscale image
mod_image=processing.grayscale(image)
paths=os.path.join("test_images","results","grayscale",img)
plt.imsave(paths,mod_image)
#gaussian blur
mod_image=processing.gaussian_blur(img=mod_image,kernel_size=5)
#canny edge detection
mod_image=processing.canny(img=mod_image,low_threshold=50, high_threshold=150)
paths=os.path.join("test_images","results","canny",img)
plt.imsave(paths,mod_image)
#masking unnecessary edges using triangular masking
left_bottom = [100, 535]
right_bottom = [900, 535]
apex = [500, 300]
#mask = np.zeros_like(mod_image)
#ignore_mask_color = 255
#imshape = image.shape
vertices = np.array([left_bottom, right_bottom, apex], dtype=np.int32)
#cv2.fillPoly(mask, vertices, ignore_mask_color)
#masked_edges = cv2.bitwise_and(mod_image, mask)
masked_edges= processing.region_of_interest(img=mod_image, vertices=vertices)
#hough transform
mod_image=processing.hough_lines(img=masked_edges, rho=int(1), theta=int(np.pi/180), threshold=int(50), min_line_len=int(150), max_line_gap=int(150))
#print('This image is:', type(grey), 'with dimensions:', grey.shape)
#interopolate hough transformed image to main image
mod_image = processing.weighted_img(mod_image,image)
plt.imshow(mod_image)
paths=os.path.join("test_images","results","hough_transform",img)
print(os.path.join("test_images","results","mod_image_"+img))
plt.imsave(paths,mod_image)
```
## Build a Lane Finding Pipeline
```
folder= 'test_images'
for filename in os.listdir(folder):
if ".jpg" in filename:
print(f"processing file : {filename}")
perform_magic_py(os.path.join(folder,filename))
#print(os.path.join(folder,filename))
```
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
```
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
```
## Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
`solidWhiteRight.mp4`
`solidYellowLeft.mp4`
**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
**If you get an error that looks like this:**
```
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
```
**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
return result
```
Let's try the one with the solid white lane on the right first ...
```
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
```
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
```
## Improve the draw_lines() function
**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
Now for the one with the solid yellow lane on the left. This one's more tricky!
```
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Writeup and Submission
If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.
## Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
```
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
```
| github_jupyter |
# Batch correction
What is batch correction? A "Batch" is when experiments have been performed at different times and there's some obvious difference between them. Single-cell experiments are often inherently "batchy" because you can only perform so many single cell captures at once, and you do multiple captures, over different days, with different samples. How do you correct for the technical noise without deleting the true biological signal?
## Avoiding batch effects
First things first, it's best to design your experiments to minimize batch effects. For example, if you can mix your samples such that there are multiple representations of samples per single-cell capture, then this will help because you will have representations of both biological and technical variance across batches, rather than BOTH biological and technical variance.

[Hicks et al, preprint](http://biorxiv.org/content/early/2015/12/27/025528)
### Bad: Technical variance is the same as biological variance

Here, when you try to correct for batch effects between captures, it's impossible to know whether you're removing the technical noise of the different captures, or the biological signal of the data.
### Good: Technical variance is different from biological variance
The idea here is that you would ahead of time, mix the cells from your samples in equal proportions and then perform cell capture on the mixed samples, so you would get different technical batches, but they wouldn't be counfounded by the biological signals.

Here, when you correct for batch effects, the technical batches and biological signals are separate.
### If it's completely impossible to do multiple biological samples in the same technical replicate...
For example, if you have to harvest your cells at parcticular timepoints, here are some ways that you can try to mitigate the batch effects:
* Repeat the timepoint
* Save an aliquot of cells from each timepoint and run another experiment with the mixed aliquots
## Correcting batch effects
Okay so say your data are such that you couldn't have mixed your biological samples ahead of time. What do you do?
There's two main ways to approach batch correction: using groups of samples or groups of features (genes).
### Sample-batchy
This is when you have groups of samples that may have some biological difference between them, but also have technical differences between them. Say, you performed single-cell capture on several different days from different mice, of somewhat overlapping ages. You know that you have the biological signal from the different mice and the different ages, but you *also* have the technical signal from the different batches. BUT there's no getting around that you had to sacrifice the mice and collect their cells in one batch
### Feature-batchy
This is when you think particular groups of genes are contributing to the batch effects.
How to find these features:
* Numerical feature (e.g. RIN) associated with each sample
* Cell cycle genes (??Buetttner 2015?)
* (RUVseq) - Use an external dataset (e.g. bulk samples) to find non-differentially expressed genes and use them to correct between groups
```
from __future__ import print_function
# Interactive Python (IPython - now Jupyter) widgets for interactive exploration
import ipywidgets
# Numerical python library
import numpy as np
# PLotting library
import matplotlib.pyplot as plt
# Dataframes in python
import pandas as pd
# Linear model correction
import patsy
# Even better plotting
import seaborn as sns
# Batch effect correction
# This import statement only works because there's a folder called "combat_py" here, not that there's a module installed
from combat_py.combat import combat
# Use the styles and colors that I like
sns.set(style='white', context='talk', palette='Set2')
%matplotlib inline
```
### Feature-batchy
```
np.random.seed(2016)
n_samples = 10
n_genes = 20
half_genes = int(n_genes/2)
half_samples = int(n_samples/2)
size = n_samples * n_genes
genes = ['Gene_{}'.format(str(i+1).zfill(2)) for i in range(n_genes)]
samples = ['Sample_{}'.format(str(i+1).zfill(2)) for i in range(n_samples)]
data = pd.DataFrame(np.random.randn(size).reshape(n_samples, n_genes), index=samples, columns=genes)
# Add biological variance
data.iloc[:half_samples, :half_genes] += 1
data.iloc[:half_samples, half_genes:] += -1
data.iloc[half_samples:, half_genes:] += 1
data.iloc[half_samples:, :half_genes] += -1
# Biological samples
mouse_groups = pd.Series(dict(zip(data.index, (['Mouse_01'] * int(n_samples/2)) + (['Mouse_02'] * int(n_samples/2)))),
name="Mouse")
mouse_to_color = dict(zip(['Mouse_01', 'Mouse_02'], ['lightgrey', 'black']))
mouse_colors = [mouse_to_color[mouse_groups[x]] for x in samples]
# Gene colors
gene_colors = sns.color_palette('husl', n_colors=n_genes)
```
### Plot original biological variance data
```
g = sns.clustermap(data, row_colors=mouse_colors, col_cluster=False, row_cluster=False, linewidth=0.5,
col_colors=gene_colors,
cbar_kws=dict(label='Normalized Expression'))
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);
def make_tidy(data, sample_groups):
tidy = data.unstack()
tidy = tidy.reset_index()
tidy = tidy.rename(columns={'level_0': 'Gene', 'level_1': "Sample", 0: "Normalized Expression"})
tidy = tidy.join(sample_groups, on='Sample')
return tidy
tidy = make_tidy(data, mouse_groups)
fig, ax = plt.subplots()
sns.boxplot(hue='Gene', y='Normalized Expression', data=tidy, x='Mouse')
ax.legend_.set_visible(False)
```
### Add technical noise
```
# Choose odd-numbered samples to be in batch1 and even numbered samples to be in batch 2
batch1_samples = samples[::2]
batch2_samples = data.index.difference(batch1_samples)
batches = pd.Series(dict((x, 'Batch_01') if x in batch1_samples else (x, "Batch_02") for x in samples), name="Batch")
# Add random noise for all genes except the last two in each batch
noisy_data = data.copy()
noisy_data.ix[batch1_samples, :-2] += np.random.normal(size=n_genes-2, scale=2)
noisy_data.ix[batch2_samples, :-2] += np.random.normal(size=n_genes-2, scale=2)
# Assign colors for batches
batch_to_color = dict(zip(["Batch_01", "Batch_02"], sns.color_palette()))
batch_colors = [batch_to_color[batches[x]] for x in samples]
row_colors = [mouse_colors, batch_colors]
g = sns.clustermap(noisy_data, row_colors=row_colors, col_cluster=False, row_cluster=False, linewidth=0.5,
col_colors=gene_colors, cbar_kws=dict(label='Normalized Expression'))
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);
```
We can see that there's some batch effect - for batch1 (light grey), `Gene_15` is in general lower, and `Gene_01` is in general higher. And for batch2 (black), `Gene_16` is in general higher.
But, Gene_19 and Gene_20 are unaffected.
```
tidy_noisy = make_tidy(noisy_data, mouse_groups)
tidy_noisy = tidy_noisy.join(batches, on='Sample')
tidy_noisy.head()
```
Lets plot the boxplots of data the same way, with the x-axis as the mouse they came from and the y-axis ad the genes
```
fig, ax = plt.subplots()
sns.boxplot(hue='Gene', y='Normalized Expression', data=tidy_noisy, x='Mouse')
ax.legend_.set_visible(False)
```
We can see that compared to before, where we had clear differences in gene expression from genes 1-10 and 11-19 in the two mice, we don't see it as much with the noisy data.
Now let's plot the data a different way, with the x-axis as the *batch*
```
fig, ax = plt.subplots()
sns.boxplot(hue='Gene', y='Normalized Expression', data=tidy_noisy, x='Batch')
ax.legend_.set_visible(False)
```
## How to quantify the batch effect?
```
fig, ax = plt.subplots()
sns.pointplot(hue='Batch', x='Normalized Expression', data=tidy_noisy, y='Gene', orient='horizontal',
scale=0.5, palette=batch_colors)
fig, ax = plt.subplots()
sns.pointplot(hue='Batch', x='Normalized Expression', data=tidy_noisy, y='Gene', orient='horizontal', scale=0.5)
sns.pointplot(x='Normalized Expression', data=tidy_noisy, y='Gene', orient='horizontal', scale=0.75, color='k',
linestyle=None)
```
## How to get rid of the batch effect?
### COMBAT
We will use "COMBAT" to get rid of the batch effect. What combat does is basically what we just did with our eyes and intuition - find genes whose gene expression varies greatly between batches, and adjust the expression of the gene so it's closer to the mean total expression across batches.
(may need to whiteboard here)
Create metadata matrix
```
metadata = pd.concat([batches, mouse_groups], axis=1)
metadata
def remove_batch_effects_with_combat(batch, keep_constant=None, cluster_on_correlations=False):
if keep_constant is not None or keep_constant in metadata:
# We'll use patsy (statistical models in python) to create a "Design matrix" which encodes the batch as
# a boolean (0 or 1) value so the computer cna understand it.
model = patsy.dmatrix('~ {}'.format(keep_constant), metadata, return_type="dataframe")
elif keep_constant == 'null' or keep_constant is None:
model = None
# --- Correct for batch effects --- #
corrected_data = combat(noisy_data.T, metadata[batch], model)
# Transpose so samples are the rows and the features are the columns
corrected_data = corrected_data.T
# --- Plot the heatmap --- #
if cluster_on_correlations:
g = sns.clustermap(corrected_data.T.corr(), row_colors=row_colors, col_cluster=True, row_cluster=True, linewidth=0.5,
vmin=-1, vmax=1, col_colors=row_colors, cbar_kws=dict(label='Pearson R'))
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);
else:
g = sns.clustermap(corrected_data, row_colors=row_colors, col_cluster=False, row_cluster=False, linewidth=0.5,
col_colors=gene_colors, cbar_kws=dict(label='Normalized Expression'))
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);
# Uncomment the line below to save the batch corrected heatmap
# g.savefig('combat_batch_corrected_clustermap.pdf')
# --- Quantification of the batch effect correction --- #
# Create a "tidy" version of the dataframe for plotting
tidy_corrected = make_tidy(corrected_data, mouse_groups)
tidy_corrected = tidy_corrected.join(batches, on='Sample')
tidy_corrected.head()
# Set up the figure
# 4 columns of figure panels
figure_columns = 4
width = 4.5 * figure_columns
height = 4
fig, axes = plt.subplots(ncols=figure_columns, figsize=(width, height))
# PLot original data vs the corrected data
ax = axes[0]
ax.plot(data.values.flat, corrected_data.values.flat, 'o',
# Everything in the next line is my personal preference so it looks nice
alpha=0.5, markeredgecolor='k', markeredgewidth=0.5)
ax.set(xlabel='Original (Batchy) data', ylabel='COMBAT corrected data')
# PLot the mean gene expression within batch in colors, and the mean gene expression across both batches in black
ax = axes[1]
sns.pointplot(hue='Batch', x='Normalized Expression', data=tidy_corrected, y='Gene', orient='horizontal', scale=.5, ax=ax)
sns.pointplot(x='Normalized Expression', data=tidy_corrected, y='Gene', orient='horizontal',
scale=0.75, color='k', linestyle=None, ax=ax)
# PLot the gene epxression distribution per mouse
ax = axes[2]
sns.boxplot(hue='Gene', y='Normalized Expression', data=tidy_corrected, x='Mouse', ax=ax,
# Adjusting linewidth for my personal preference
linewidth=1)
# Don't show legend because it's too big
ax.legend_.set_visible(False)
# --- Plot boxplots of average difference between gene expression in batches --- #
# Gete mean gene expression within batch for the original noisy data
mean_batch_expression = noisy_data.groupby(batches).mean()
noisy_batch_diff = (mean_batch_expression.loc['Batch_01'] - mean_batch_expression.loc['Batch_02']).abs()
noisy_batch_diff.name = 'mean(|batch1 - batch2|)'
noisy_batch_diff = noisy_batch_diff.reset_index()
noisy_batch_diff['Data type'] = 'Noisy'
# Get mean gene expression within batch for the corrected data
mean_corrected_batch_expression = corrected_data.groupby(batches).mean()
corrected_batch_diff = (mean_corrected_batch_expression.loc['Batch_01'] - mean_corrected_batch_expression.loc['Batch_02']).abs()
corrected_batch_diff.name = 'mean(|batch1 - batch2|)'
corrected_batch_diff = corrected_batch_diff.reset_index()
corrected_batch_diff['Data type'] = 'Corrected'
# Compile the two tables into one (concatenate)
batch_differences = pd.concat([noisy_batch_diff, corrected_batch_diff])
batch_differences.head()
sns.boxplot(x='Data type', y='mean(|batch1 - batch2|)', data=batch_differences, ax=axes[3])
# Remove right and top axes lines so it looks nicer
sns.despine()
# Magically adjust the figure panels (axes) so they fit nicely
fig.tight_layout()
# Uncomment the line below to save the figure of three panels
# fig.savefig('combat_batch_corrected_panels.pdf')
ipywidgets.interact(
remove_batch_effects_with_combat,
batch=ipywidgets.Dropdown(options=['Mouse', 'Batch'], value="Batch", description='Batch to correct for'),
keep_constant=ipywidgets.Dropdown(value=None, options=[None, 'Mouse', 'Batch', 'Mouse + Batch'],
description='Variable of interest'),
cluster_on_correlations=ipywidgets.Checkbox(value=False, description="Cluster on (Pearson) correlations between samples"));
```
Try doing these and see how they compare. Do you see similar trends to the original data? Do any of these create errors? Why would that be?
1. Batch to correct for = Batch, Variable of interest = Mouse
2. Batch to correct for = Mouse, Variable of interest = Batch
3. Batch to correct for = Batch, Variable of interest = Mouse + Batch
4. ... your own combinations!
With each of these try turning "Cluster on (Pearson) correlations between samples" on and off.
This is a nice way that we can visualize the improvement in reducing the batch-dependent signal.
## Feature-batchy
What if there are specific genes or features that are contributing to the batches?
This is the idea behind correcting for cell-cycle genes or some other feature that you know is associated with the data, e.g. the RNA Integrity Number (RIN).
Let's add some feature-batchy noise to our original data
```
metadata['RIN'] = np.arange( len(samples)) + 0.5
metadata
```
Add noise and plot it. Use first and last genes as controls that dno't have any noise
```
# rin_noise = metadata['RIN'].apply(lambda x: pd.Series(np.random.normal(loc=x, size=n_genes), index=genes))
rin_noise = metadata['RIN'].apply(lambda x: pd.Series(np.ones(n_genes-2)*x, index=genes[1:-1]))
rin_noise = rin_noise.reindex(columns=genes)
rin_noise = rin_noise.fillna(0)
g = sns.clustermap(rin_noise, row_colors=mouse_colors, col_cluster=False, row_cluster=False, linewidth=0.5,
col_colors=gene_colors, cbar_kws=dict(label='RIN Noise'))
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);
```
Add the noise to the data and re-center so that each gene's mean is approximately zero.
```
rin_batchy_data = data + rin_noise
rin_batchy_data
# Renormalize the data so genes are 0-centered
rin_batchy_data = (rin_batchy_data - rin_batchy_data.mean())/rin_batchy_data.std()
g = sns.clustermap(rin_batchy_data, row_colors=mouse_colors, col_cluster=False, row_cluster=False, linewidth=0.5,
col_colors=gene_colors, cbar_kws=dict(label='Normalized Expression'))
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);
```
If we plot the RIN vs the RIN-batchy gene expression, we'll see that from this one variable, we see an increase in expression! Of course, we could also have created a variable that linearly decreases expression.
```
tidy_rin_batchy = make_tidy(rin_batchy_data, mouse_groups)
tidy_rin_batchy = tidy_rin_batchy.join(metadata['RIN'], on='Sample')
g = sns.FacetGrid(tidy_rin_batchy, hue='Gene')
g.map(plt.plot, 'RIN', 'Normalized Expression', alpha=0.5)
```
### Use RIN to predict gene expression
We will use linear regression to use RIN as our dependent variable and predict gene expression from there. Then we'll create a new, corrected matrix, with the influence of RIN removed
```
from __future__ import print_function
import six
from sklearn import linear_model
regressor = linear_model.LinearRegression()
regressor
# Use RIN as the "X" - the "dependent" variable, the one you expect your gene expression to vary with.
regressor.fit(metadata['RIN'].to_frame(), rin_batchy_data)
# Use RIN to predict gene expression
rin_dependent_data = pd.DataFrame(regressor.predict(metadata['RIN'].to_frame()), columns=genes, index=samples)
rin_dependent_data
from sklearn.metrics import r2_score
# explained_variance = r2_score(rin_batchy_data, rin_dependent_data)
# six.print_("Explained variance by RIN:", explained_variance)
rin_corrected_data = rin_batchy_data - rin_dependent_data
rin_corrected_data
# Somewhat contrived, but try to predict the newly corrected data with RIN
r2_score(rin_corrected_data, rin_dependent_data)
tidy_rin_corrected = make_tidy(rin_corrected_data, mouse_groups)
tidy_rin_corrected = tidy_rin_corrected.join(metadata['RIN'], on="Sample")
tidy_rin_corrected.head()
g = sns.FacetGrid(tidy_rin_corrected, hue='Gene')
g.map(plt.plot, 'RIN', 'Normalized Expression', alpha=0.5)
g = sns.clustermap(rin_corrected_data, row_colors=mouse_colors, col_cluster=False, row_cluster=False, linewidth=0.5,
col_colors=gene_colors, cbar_kws=dict(label='Normalized Expression'))
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);
sns.clustermap(rin_corrected_data.T.corr(), row_colors=mouse_colors,linewidth=0.5,
col_colors=mouse_colors, cbar_kws=dict(label='Pearson R'))
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);
```
Now the data dcoens't vary by RIN! But.... now we over-corrected and removed the biological signal as well.
### Other options to talk about
As you have seen, dealing with batch effects in single-cell data is supremely difficult and the best thing you can do for yourself is design your experiment nicely so you don't have to.
* [SVA](http://www.biostat.jhsph.edu/~jleek/papers/sva.pdf)
* Can specify that you want to correct for something (like RIN) but don't correct for what you're interested in. But... often in single cell data you're trying to find new populations so you don't know *a prior* what you want to not be corrected for
* [RUVseq](http://www.nature.com/nbt/journal/v32/n9/full/nbt.2931.html)
* "RUV" = "Remove unwanted variation"
* With the "RUVg" version can specify a set of control genes that you know aren't supposed to change between groups (maybe from a bulk experiment) but they say in their manual not to use the normalized counts for differential expression, only for exploration, because you may have corrected for something you actually *DID* want but didn't know
* [scLVM](https://github.com/PMBio/scLVM)
* This method claims to account for differences in cell cycle stage and help to put all cells onto the same scale, so you can then do pseudotime ordering and clustering and all that jazz.
| github_jupyter |
# **PROGETTO**
# FEATURES NUMERICHE, CATEGORIALI, DATA
In questo notebook tratto ed introduco le features numeriche, categoriali e di tipo data. Le varie features verranno aggiunte in modo incrementale. Nel successivo notebook verranno introdotte ulteriore features: di tipo insiemistico e di tipo testuale.
Spesso è necessario valutare se vale la pena aggiungere certe features o è necessario capire che alternativa è migliore nella lavorazione di certe features : uso lo score sul validation set per capire cosa è meglio fare e per orientarmi tra le varie possibili scelte.
Verranno presi in considerazione 4 tipi di algoritmi di learning : kNN, regressione lineare, albero di decisione, random forest. Dunque per ogni possibile alternativa ho 4 modelli diversi e dunque ho 4 score sul validation set diversi. Scelgo l'alternativa e il modello che hanno score sul validation minore : questo è il modello migliore fino a quel momento ottenuto. Dunque la mia guida è sempre lo score sul validation e scelgo ciò che minimizza ciò.
Nel valutare questi 4 algoritmi faccio tuning su certi iperparametri.
1. Per kNN faccio tuning sul numero di vicini (lo indichiamo con k)
2. Per decision tree faccio tuning sul numero massimo di foglie (lo indichiamo con k)
3. Per random forest faccio tuning sul numero di alberi (lo indichiamo con k)
Linear regression invece non faccio tuning.
### FUNZIONI VALUTAZIONE E SELEZIONE MODELLI
Importo le **funzioni per la valutazione e selezione dei modelli**.
Le funzioni compute_train_val_test e model_selection_TrainValTest effettuano la valutazione tramite gli score su training/validation/test, rispettivamente su un modello solo o su una lista di modelli. Le funzioni compute_bias_variance_erroe e model_selection_BiasVarianceError effettuano la valutazione tramite il calcolo di bias/variance/error, rispettivamente su un modello solo o su una lista di modelli.
Nel progetto uso lo **score sul validation** come misura principale per selezionare un modello. Uso il calcolo di bias/variance/error come misura ulteriore di bontà, in particolare per capire come poter migliorare il modello stesso.
```
from valutazione_modelli import compute_train_val_test, model_selection_TrainValTest, compute_bias_variance_error, \
model_selection_BiasVarianceError
```
# PRIMA LETTURA E FEATURES NUMERICHE
Per prima cosa effettuiamo la prima lettura del dataset e aggiungiamo nel modello le features numeriche: "budget", "popularity", "runtime".
La funzione **cleaning_data_numeric** effettua la prima lettura e lavora ed estrae le features numeriche. Ritorna:
- *dataframe*, che è il dataframe pandas grezzo e completo di tutte le features. L'unica operazione che ho eseguito è quella di rimuovere le istanze con valori non significativi di "revenue". (*dataframe* mi serve perchè di fatto contiene tutto il dataset).
- *df*, che è il dataframe pandas che ha solo le features esplicative (X) fin'ora prese in considerazione e lavorate. Dunque per ora contiene solo le features numeriche lavorate opportunamente, come specificato nel notebook di analisi del dataset. Il dataframe *df* ha dunque 4 features: "budget", "budget_dummy", "popularity", "runtime".
- *y*, che è l'array numpy relativo a solo "revenue". I valori sono stati opportunamente scalati tramite MinMaxMScaler.
In tutto il progetto uso sia dataframe pandas (come *dataframe* e *df*) che array numpy (come *y*). I dataframe li uso per gestire, lavorare e visualizzare meglio il dataset e le features selezionate. Gli array numpy li uso per valutare i modelli.
```
from lavorazione_dataset_NumCatData import cleaning_data_numeric
dataframe, df, y = cleaning_data_numeric()
df.info()
df.describe()
```
Definiamo l'array numpy *X*. *X* è semplicemente la versione numpy di *df*. Come detto, gli array numpy li uso per valutare i modelli. Dunque valuterò i modelli sempre passando *X* e *y*.
```
X = df.values
```
# PREDITTORE BANALE : revenue come funzione lineare del solo budget.
Il primo modello che prendiamo in considerazione è un modello che considera solo "budget" come feature per spiegare "revenue". Predittore banale. In particolare consideriamo la regressione lineare tra "revenue" e "budget".
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
# Calcolo gli score su training/validation/test della regressione lineare con solo "budget".
train, val, test = compute_train_val_test1(model, X[:,0:1], y)
print("MSE : ",val)
```
Questo è il nostro primo score ottenuto.
# SOLO FEATURES NUMERICHE
Consideriamo ora tutte e 4 le features numeriche messe in X. Valutiamo i 4 algoritmi di learning su tale dataset.
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
Meglio del predittore banale.
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
Tutti e 4 modelli sono migliori del predittore banale. Il modello migliore è random forest, con un MSE sul validation di 0.00316. (34 alberi)
# AGGIUNTA FEATURES CATEGORIALI
Aggiungiamo ora le features categoriali: "belongs_to_collection", "homepage", "original_language".
Le features "belongs_to_collection" e "homepage" ho già descritto come le tratto. Invece abbiamo due alternative su come trattare "original_language". Per prima cosa allora aggiungiamo "belongs_to_collection" e "homepage".
La funzione **add_categorial** prende il dataframe completo (*dataframe*) e ritorna newdf, che è il datframe con le sole features categoriali selezionate e da aggiungere (appunto "belongs_to_collection" e "homepage"). Ritorna dunque solo le feature oggetto di studio, lavorate e processate.
Concatendando *df* e *newdf* in *df_tmp* otteniamo di fatto il dataframe con tutte le features fin'ora lavorate e selzionate. Sovrascriveremo *df* con *df_tmp* solo nel caso in cui ne valga la pena, ovvero solo nel caso in cui l'aggiunta di tali features migliora effettivamente il modello.
```
from lavorazione_dataset_NumCatData import add_categorial
import pandas as pd
newdf = add_categorial(dataframe) # newdf è il dataframe delle sole feature categoriali in questione, processate e lavorate.
df_tmp = pd.concat([df,newdf],axis=1)
```
Ora quindi abbiamo 6 features in tutto. Ecco le features aggiunte.
```
print(newdf.info())
newdf.describe()
X = df_tmp.values
```
Valutiamo i 4 algoritmi di learning su tale dataset.
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
kNN, albero di decisione e regressione lineare rimangono piuttosto stabili rispetto a prima. La random forest migliora il suo MSE sul validation. Il modello migliore ora risulta la random forest con anche le features categoriali. MSE : 0.00292. (49 alberi).
Dunque **aggiungiamo tali features**: riportiamo ciò su df.
```
df = df_tmp
```
**ORIGINAL_LANGUAGE**
Aggiungiamo ora la feature "original_language". Abbiamo due alternative su come trattare "original_language". La funzione **add_language_1** esegue la prima alternativa, mentre la funzione **add_language_2** esegue la seconda alternativa.
Entrambe le funzioni prendono in input il dataframe completo (*dataframe*) e ritornano *newdf*, ovvero il datframe delle features selezionate e lavorate. add_language_1 --> *newdf_1* ; add_language_2 --> *newdf_2*.
Concateniamo *df* con *newdf_1* in *df_tmp_1* e *df* con *newdf_2* in *df_tmp_2*. Valutiamo quale alternativa è migliore e sovrascriviamo sulla base di ciò *df*.
**Alternativa 1**
"original_language" diventa semplicemente una feature dummy : vale 1 se il film è in lingua inglese, 0 altrimenti.
Aggiungiamo dunque un ulteriore feature.
```
from lavorazione_dataset_NumCatData import add_language_1
import pandas as pd
newdf_1 = add_language_1(dataframe)
df_tmp_1 = pd.concat([df,newdf_1],axis=1)
```
Ora abbiamo 7 features in tutto. Vediamo la feature aggiunta.
```
print(newdf_1.info())
newdf1.describe()
```
Andiamo a valutare.
```
X = df_tmp_1.values
```
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
C'è un miglioramento complessivo dei modelli. Ed in particolare la random forest migliora molto il suo MSE sul validation. Dunque ora il modello migliore risulta la random forest con anche "original_language" trattata nella prima alternativa. MSE : 0.002868. (39 alberi)
**Alternativa 2**
Le prime 7 lingue rispetto al revenue medio le tengo come valori categoriali distinti. Tutte le altre lingue le accorpo nella categoria "other_language". Ho dunque una variabile categoriale con 8 valori distinti: tale feature la trasformo in 8 variabili binarie(dummy).
Aggiungo in totale 8 feature in più.
```
from lavorazione_dataset_NumCatData import add_language_2
import pandas as pd
newdf_2 = add_language_2(dataframe)
df_tmp_2 = pd.concat([df,newdf_2],axis=1)
```
Abbiamo in totale 14 features. Vediamo le features aggiunte.
```
print(newdf_2.info())
newdf_2.describe()
X = df_tmp_2.values
```
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
Gli score sono molto simili a quelli dell'alternativa 1. In ogni caso, lo score sul validation della random forest è peggiorato: dunque il MSE migliore c'era con l'alternativa 1. **Scegliamo l'alternativa 1.**
```
df = df_tmp_1
```
# AGGIUNTA FEATURES DATA
Aggiungiamo l'unica feature di tipo data: "release_date". Come visto, estraiamo da tale feature sia l'anno che il mese. Per l'anno lo trattiamo banalmente come feature numerica, per il mese invece abbiamo 6 alternative diverse da valutare.
Per ogni diversa alternativa c'è una diversa funzione. Ogni funzione prende in input il dataframe completo (*dataframe*) e ritornano *newdf*, ovvero il datframe delle features selezionate e lavorate. Siccome abbiamo 6 diverse funzioni, otteniamo 6 diversi *newdf* : *newdf_1* *newdf_2* *newdf_3* *newdf_4* *newdf_5* *newdf_6*.
Concatendo *df* con i vari *newdf* otteniamo 6 diversi *df_tmp_i*. Valutiamo quale alternativa è migliore e sovrascriviamo sulla base di ciò *df*.
## ALTERNATIVA 1
Considero il mese semplicemente come una variabile categoriale a 12 livelli: da ciò 12 features dummy binarie.
```
from lavorazione_dataset_NumCatData import add_data_1
import pandas as pd
newdf_1 = add_data_1(dataframe)
df_tmp_1 = pd.concat([df,newdf_1],axis=1)
```
Ora quindi abbiamo 20 features in tutto. Ecco le features aggiunte.
```
print(newdf_1.info())
newdf_1.describe()
X = df_tmp_1.values
```
Valutiamo i 4 algoritmi di learning su tale dataset.
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
Lo score migliore è, come sempre, quello della random forest. MSE : 0.002699 (43 alberi). Lo score è migliorato rispetto al modello senza features della data.
## ALTERNATIVA 2
Tengo come valori distinti solo i primi 5 mesi rispetto alla numerosità di film: tutti gli altri film li accorpo nel livello "other_month". Ottengo quindi 6 livelli possibili --> 6 nuove features dummy.
```
from lavorazione_dataset_NumCatData import add_data_2
import pandas as pd
newdf_2 = add_data_2(dataframe)
df_tmp_2 = pd.concat([df,newdf_2],axis=1)
```
Ora quindi abbiamo 14 features in tutto. Ecco le features aggiunte.
```
print(newdf_2.info())
newdf_2.describe()
X = df_tmp_2.values
```
Valutiamo i 4 algoritmi di learning su tale dataset.
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
Lo score minore (sempre di random forest) è migliore rispetto all'alternativa 1. MSE : 0.002696(41 alberi).
## ALTERNATIVA 3
Considero come valori possibili solo i primi 5 mesi con media revenue maggiore. Tutti gli altri mesi li accorpo nel valore "other_month". Ottengo quindi 6 livelli possibili --> 6 nuove features dummy.
```
from lavorazione_dataset_NumCatData import add_data_3
import pandas as pd
newdf_3 = add_data_3(dataframe)
df_tmp_3 = pd.concat([df,newdf_3],axis=1)
```
Ora quindi abbiamo 14 features in tutto. Ecco le features aggiunte.
```
print(newdf_3.info())
newdf_3.describe()
X = df_tmp_3.values
```
Valutiamo i 4 algoritmi di learning su tale dataset.
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
Lo score minore (sempre di random forest) è migliore rispetto all'alternativa 2: per ora l'alternativa 3 è la migliore. MSE : 0.002686 (41 alberi).
## ALTERNATIVA 4
Creiamo una sola feature categoriale binaria (dummy) : 1 se il mese del film è nei primi 6 mesi con revenue maggiore ; 0 se è nei 6 con revenue peggiore.
```
from lavorazione_dataset_NumCatData import add_data_4
import pandas as pd
newdf_4 = add_data_4(dataframe)
df_tmp_4 = pd.concat([df,newdf_4],axis=1)
```
Ora quindi abbiamo 9 features in tutto. Ecco le features aggiunte.
```
print(newdf_4.info())
newdf_4.describe()
X = df_tmp_4.values
```
Valutiamo i 4 algoritmi di learning su tale dataset.
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
Lo score minore (sempre di random forest) non è migliore rispetto all'alternativa 3: l'alternativa 3 rimane la migliore.
## ALTERNATIVA 5
Dividiamo i mesi in 3 gruppi : il primo mese più rilevante ; gli altri 5 mesi più rilevanti ; i restanti 6 mesi più rilevanti.
Feature categoriale con 3 livelli --> dunque 3 features dummy.
```
from lavorazione_dataset_NumCatData import add_data_5
import pandas as pd
newdf_5 = add_data_5(dataframe)
df_tmp_5 = pd.concat([df,newdf_5],axis=1)
```
Ora quindi abbiamo 11 features in tutto. Ecco le features aggiunte.
```
print(newdf_5.info())
newdf_5.describe()
X = df_tmp_5.values
```
Valutiamo i 4 algoritmi di learning su tale dataset.
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
Lo score minore (sempre di random forest) non è migliore rispetto all'alternativa 3: l'alternativa 3 rimane la migliore.
## ALTERNATIVA 6
Dividiamo i mesi in 3 gruppi : primi 4 mesi migliori rispetto a revenue; successivi 4 mesi migliori ; ultimi 4 mesi. Dunque sempre 3 livelli, ma questa volta più bilanciati. Dunque abbiamo 3 livelli possibili per la features categorica mese: da ciò 3 features dummy binarie.
```
from lavorazione_dataset_NumCatData import add_data_6
import pandas as pd
newdf_6 = add_data_6(dataframe)
df_tmp_6 = pd.concat([df,newdf_6],axis=1)
```
Ora quindi abbiamo 11 features in tutto. Ecco le features aggiunte.
```
print(newdf_6.info())
newdf_6.describe()
X = df_tmp_6.values
```
Valutiamo i 4 algoritmi di learning su tale dataset.
### **1) KNN (con preprocessing)**
```
from sklearn.neighbors import KNeighborsRegressor
models = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True,
plottaTest=True, xvalues=range(1,50), xlabel="Numero vicini",
title="Valutazione modelli kNN con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,50))[best_model])
```
### 2) DECISION TREE REGRESSOR
```
from sklearn.tree import DecisionTreeRegressor
models = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(2,52), xlabel="Numero massimo foglie",
title="Valutazione modelli decision tree con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(2,52))[best_model])
```
### 3) LINEAR REGRESSION
```
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True) # Modello regressione lineare
train, val, test = compute_train_val_test(model, X, y)
print("MSE : ",val)
```
### 4) RANDOM FOREST
```
from sklearn.ensemble import RandomForestRegressor
models = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]
list_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,
xvalues=range(1,51), xlabel="Numero alberi",
title="Valutazione modelli random forest con score sul training/validation/test" )
print("MSE migliore: ",list_trainValTest[best_model][1]," | k: ",list(range(1,51))[best_model])
```
Lo score minore (sempre di random forest) è migliore rispetto all'alternativa 3: l'alternativa 6 è la migliore. **In definitiva dunque scegliamo l'alternativa 6.** MSE : 0.002672 (39 alberi).
Riportiamo ciò su *df*.
```
df = df_tmp_6
```
**Il dataset lavorato ottenuto fin'ora è dunque il seguente. 11 features.**
```
df.info()
```
**Il migliore modello risulta random forest con 39 alberi. MSE : 0.002672.**
Nel notebook successivo si introducono le features di tipo insiemistico e testuale.
| github_jupyter |
```
import tensorflow
import pandas as pd
import time
import numpy as np
# ignore all info and warnings but not error messages
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# tensorflow libraries
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout
# sklearn libraries are useful for preprocessing, performance measures, etc.
from sklearn import preprocessing
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import train_test_split
```
# Read Data
```
df = pd.read_csv('./features_30_sec.csv')
df.head()
df['label'].value_counts()
```
Split and scale dataset
```
columns_to_drop = ['label','filename', 'length']
def prepare_dataset(df, columns_to_drop, test_size, random_state):
# Encode the labels from 0 to n_classes-1
label_encoder = preprocessing.LabelEncoder()
df['label'] = label_encoder.fit_transform(df['label'])
# devide data to train and test
df_train, df_test = train_test_split(df, test_size=test_size, random_state=random_state)
# scale the training inputs
x_train = df_train.drop(columns_to_drop,axis=1)
y_train = df_train['label'].to_numpy()
standard_scaler = preprocessing.StandardScaler()
x_train_scaled = standard_scaler.fit_transform(x_train)
#scale and prepare testing data
x_test = df_test.drop(columns_to_drop,axis=1)
x_test_scaled = standard_scaler.transform(x_test)
y_test = df_test['label'].to_numpy()
return x_train_scaled, y_train, x_test_scaled, y_test
x_train, y_train, x_test, y_test = prepare_dataset(df, columns_to_drop, test_size=0.3, random_state=0)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
```
# Q1
For all parts, `train`, `valid`, `test` will be defined as follows:
`train`:
- data that network weights are updated after seeing
`valid`:
- data that network weights are **NOT** updated after seeing
`test`:
- data that network will only see only once to evaluate generability
- either with `model.evaluate()`
- or training on (`train` + `valid`), validating on `test` after cross validation
- to get better estimate of model performance with chosen hyperparameters
As seen below,
```python
y_valid[2] == 2 # integer
```
instead of something like
```python
y_valid == [0 1 0 0 0] # one hot
```
so `sparse_categorical_crossentropy` should be used as opposed to `categorical_crossentropy`
```
x_valid = x_test
y_valid = y_test
classes = len(df['label'].unique())
y_valid[2]
def run_network(
train=(x_train, y_train),
valid=(x_valid, y_valid),
dropout=.3,
neurons=16,
batch_size=1,
epochs=50,
verbose=0,
callbacks=[],
summary=False
):
# clear previous models
tf.keras.backend.clear_session()
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(x_train.shape[1])),
tf.keras.layers.Dense(neurons, 'relu'),
tf.keras.layers.Dropout(dropout),
tf.keras.layers.Dense(classes)
])
if summary:
return model.summary()
model.compile(
tf.optimizers.Adam(),
metrics='accuracy',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # for efficiency
)
return model.fit(
x=train[0],
y=train[1],
verbose=verbose,
epochs=epochs,
batch_size=batch_size,
callbacks=callbacks,
validation_data=valid
)
```
Model architecture
```
history = run_network(summary=True)
%%time
history = run_network()
import matplotlib.pyplot as plt
def plot(history, suptitle_label=''):
f, axes = plt.subplots(1, 2, figsize=(12, 4))
f.subplots_adjust(top=.75 if suptitle_label == '' else .7)
accuracy = history['accuracy']
loss = history['loss']
val_accuracy = history['val_accuracy']
val_loss = history['val_loss']
axes[1].plot(history['accuracy'])
axes[1].plot(history['val_accuracy'])
axes[1].set_title('Model accuracy')
axes[1].set(ylabel = 'accuracy', xlabel = 'Epoch')
axes[1].legend(['Train', 'Valid'], loc='upper left')
axes[0].plot(history['loss'])
axes[0].plot(history['val_loss'])
axes[0].set_title('Model loss')
axes[0].set(ylabel = 'Loss', xlabel = 'Epoch')
axes[0].legend(['Train', 'Valid'], loc='upper left')
axes[0].grid()
axes[1].grid()
title = (
suptitle_label +
'Min Training loss: {:.{}f}\n'.format(np.min(loss), 3) +
'Max Training accuracy: {:.{}f}\n'.format(np.max(accuracy), 3) +
'Min Validation loss: {:.{}f}\n'.format(np.min(val_loss), 3) +
'Max Validation accuracy: {:.{}f}\n'.format(np.max(val_accuracy), 3)
)
f.suptitle(title)
```
Observations:
- for both loss and accuracy:
- `valid` diverges from `train` at around epoch 10
- loss diverges a lot more than accuracy
Insights:
- model starts overfitting around epoch 10
- memorizing `train`
- learning information (weights updated) that does not generalise to predicting `valid`
- large divergence in loss does not mean large divergence in accuracy
- not inversely related (although this is intuitive)
- predictions may become more uncertain
- probability of predicting `1` when class is indeed `1` decreases but is still the most probable when compared against the other classes, so accuracy may plateau while loss increases
```
plot(history.history)
```
# Q2
- 30% of data that was `valid` is now `test`
- will be withheld from training and validation until hyperparameters are chosen
- 70% of data that was `train` will be split into 3 folds
- each fold will be taken as `valid` once, with the other folds as `train`
```
x_test = x_valid
y_test = y_valid
rkfold = RepeatedKFold(n_splits=3, random_state=0)
```
RepeatedKFold is used
- for each of the 6 `batch_sizes`
- 3 fold is repeated 10 times
- average will be taken across this 10 sets of 3 folds as seen in the next cell
```
%%time
kfold_history = {}
batch_sizes = [1, 4, 8, 16, 32, 64]
for batch_size in batch_sizes:
kfold_history[batch_size] = []
for train, valid in rkfold.split(x_train):
train_x, valid_x = x_train[train], x_train[valid]
train_y, valid_y = y_train[train], y_train[valid]
history = run_network(
train=(train_x, train_y),
valid=(valid_x, valid_y),
batch_size=batch_size)
kfold_history[batch_size].append(history)
```
Observations:
- increasing `batch_size` results in poorer best performance throughout the 50 epochs
- larger `batch_size` decreases divergence
- `batch_size=64` has smallest divergence and thus is the optimal `batch_size`
Insights:
- convergence to global minimum of cost function is slower for larger `batch_size`
- but useful information is learnt
- weights updated to improve `train` is generalisable to `valid` as well
```
history = {}
for batch_size in batch_sizes:
for key in kfold_history[1][0].history.keys():
history[key] = np.mean(
[h.history[key] for h in kfold_history[batch_size]],
axis=0
)
plot(history, suptitle_label=f'batch_size: {batch_size}\n')
import time
class EpochTime(keras.callbacks.Callback):
def __init__(self, logs={}):
self.time_taken = []
def on_epoch_begin(self, epoch, logs=None):
self.start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
self.time_taken.append(time.time() - self.start_time)
%%time
et = EpochTime()
for batch_size in batch_sizes:
for train, valid in rkfold.split(x_train):
train_x, valid_x = x_train[train], x_train[valid]
train_y, valid_y = y_train[train], y_train[valid]
history = run_network(
train=(train_x, train_y),
valid=(valid_x, valid_y),
epochs=1,
batch_size=batch_size,
callbacks=[et]
)
```
- `batch_size=64` is the fastest (due to capitalization of optimization of matrix multiplication)
- multi core cpu parallelization
- gpu core parallelization
- `batch_size=64` also results in better generalisation as described before
Thus `batch_size=64` is optimal
```
table = {k: [] for k in batch_sizes}
for i, t in enumerate(et.time_taken):
table[batch_sizes[i // 30]].append(t)
pd.DataFrame.from_dict(table).apply(lambda x: [round(np.median(x), 2)])
```
Now that `batch_size=64` is chosen
- retrain on `train` + `valid` and validate on `test` to get better estimate on model's performance as mentioned
- usually before deployment of model, it will be retrained on all of the data, with no data set aside for `valid` or `test`
- since estimate of model's performance is acceptable
```
x_valid = x_test
y_valid = y_test
%%time
history = run_network(batch_size=64)
plot(history.history, suptitle_label=f'optimal batch_size: 64\n')
```
minibatch:
- capitalises on vectorisation to train faster (matrix multiplication of `batch_size` number of samples at one go)
stochastic:
- converges/diverges faster but often at local minima instead of global minima
model training:
- stochastic gradient descent is almost never used
# Q3
```
%%time
kfold_history = {}
neurons = [8, 16, 32, 64]
for neuron in neurons:
kfold_history[neuron] = []
for train, valid in rkfold.split(x_train):
train_x, valid_x = x_train[train], x_train[valid]
train_y, valid_y = y_train[train], y_train[valid]
history = run_network(
train=(train_x, train_y),
valid=(valid_x, valid_y),
batch_size=32,
neurons=neuron
)
kfold_history[neuron].append(history)
```
Observations:
- `neurons=8`
- smallest divergence and thus is the optimal `neurons`
- similar to `batch_size=64`, valid accuracy is better than train accuracy.
- likely due to dropout activating only in training, thus valid outperforms train
- could also be that for `neurons=8`, `dropout=0.3` is too high
Insights:
- useful information is learnt
- weights updated to improve `train` is generalisable to `valid` as well
```
history = {}
for neuron in neurons:
for key in kfold_history[8][0].history.keys():
history[key] = np.mean(
[h.history[key] for h in kfold_history[neuron]],
axis=0
)
plot(history, suptitle_label=f'neurons: {neuron}\n')
%%time
history = run_network(
batch_size=64,
neurons=8
)
plot(history.history, suptitle_label=f'optimal neurons: 8\n')
```
Other things that can be done:
- hidden layers
- vary number of hidden layers
- learning rate
- start with `lr>1e-3` which is the default for `Adam`
- use a learning rate scheduler to decay learning rate as epochs progress
- big steps towards global minima at early epochs as randomly initialised weights are likely far from good
- slowly reduce steps as epochs increase as global minima is likely close, want to give chance to find it
- optimizer
- change optimizer
- tweak optimizer params like `beta_1` for `Adam`
- usually small improvements and/or not worth the time tweaking
- others
- many other ways that will not be listed here
# Q4
```
def run_2_hidden_network(
train=(x_train, y_train),
valid=(x_valid, y_valid),
dropout=.3,
neurons=16,
batch_size=1,
epochs=50,
verbose=0,
callbacks=[],
summary=False
):
# clear previous models
tf.keras.backend.clear_session()
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(x_train.shape[1])),
tf.keras.layers.Dense(neurons, 'relu'),
tf.keras.layers.Dropout(dropout),
tf.keras.layers.Dense(neurons, 'relu'),
tf.keras.layers.Dropout(dropout),
tf.keras.layers.Dense(classes)
])
if summary:
return model.summary()
model.compile(
tf.optimizers.Adam(),
metrics='accuracy',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
)
return model.fit(
x=train[0],
y=train[1],
verbose=verbose,
epochs=epochs,
batch_size=batch_size,
callbacks=callbacks,
validation_data=valid
)
%%time
two_hidden_layer = run_2_hidden_network(summary=True)
%%time
two_hidden_layer = run_2_hidden_network(
neurons=8
)
one_hidden_layer = run_network(
batch_size=64,
neurons=8
)
```
Observations:
- two hidden layer
- loss and accuracy is more unstable
- but better performance loss-wise and accuracy-wise
- possibly due to stochastic nature
- one hidden layer
- little loss and accuracy divergence
- stable (little fluctuation as opposed to two hidden layer)
Insights:
- this suggests 2 hidden layers overfits on `train` more as more layers in allows for the model to memorize the `train` data and not generalize to `valid` data
```
plot(one_hidden_layer.history, suptitle_label=f'1 hidden layer\n')
plot(two_hidden_layer.history, suptitle_label=f'2 hidden layers\n')
```
# Q5
```
%%time
no_dropout = run_network(dropout=0)
dropout = run_network()
```
- larger divergence without dropout
- When dropout is removed, the lack of regularisation causes the model to overfit even more on `train`
```
plot(no_dropout.history, suptitle_label=f'No Dropout\n')
plot(dropout.history, suptitle_label=f'Dropout\n')
```
Another approach to combat overfitting is `tf.keras.layers.BatchNormalization()`
Normalize output after first hidden layer to have each dimension in a similar scale
# Summary
Instead of using hand crafted features, deep features can be used. For example, audio files can be converted to Mel spectrograms (sample attached below). This can now be approached as a computer vision problem, where (deep) visual features are learnt from the convolutional layers and used for classification instead.
This is much more scalable as deep features are implicitly learnt; in other words, no engineer is needed to think up ways/features to describe each audio file such as harmonics, as these will be implicitly learnt in theory

| github_jupyter |
_Lambda School Data Science — Big Data_
# AWS SageMaker
### Links
#### AWS
- The Open Guide to Amazon Web Services: EC2 Basics _(just this one short section!)_ https://github.com/open-guides/og-aws#ec2-basics
- AWS in Plain English https://www.expeditedssl.com/aws-in-plain-english
- Amazon SageMaker » Create an Amazon SageMaker Notebook Instance https://docs.aws.amazon.com/sagemaker/latest/dg/gs-setup-working-env.html
- Amazon SageMaker » Install External Libraries https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-add-external.html
`conda install -n python3 bokeh dask datashader fastparquet numba python-snappy`
#### Dask
- Why Dask? https://docs.dask.org/en/latest/why.html
- Use Cases https://docs.dask.org/en/latest/use-cases.html
- User Interfaces https://docs.dask.org/en/latest/user-interfaces.html
#### Numba
- A ~5 minute guide http://numba.pydata.org/numba-doc/latest/user/5minguide.html
## 1. Estimate pi
https://en.wikipedia.org/wiki/Approximations_of_π#Summing_a_circle's_area
### With plain Python
```
import random
def monte_carlo_pi(nsamples):
acc = 0
for _ in range(int(nsamples)):
x = random.random()
y = random.random()
if (x**2 + y**2) < 1.0:
acc += 1
return 4.0 * acc / nsamples
%%time
monte_carlo_pi(1e7)
```
### With Numba
http://numba.pydata.org/
```
from numba import njit
```
## 2. Loop a slow function
### With plain Python
```
from time import sleep
def slow_square(x):
sleep(1)
return x**2
%%time
[slow_square(n) for n in range(16)]
```
### With Dask
- https://examples.dask.org/delayed.html
- http://docs.dask.org/en/latest/setup/single-distributed.html
```
from dask import compute, delayed
```
## 3. Analyze millions of Instacart orders
### Download data
https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2
```
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
!ls -lh *.csv
```
### With Pandas
#### Load & merge data
```
import pandas as pd
%%time
order_products = pd.concat([
pd.read_csv('order_products__prior.csv'),
pd.read_csv('order_products__train.csv')])
order_products.info()
order_products.head()
products = pd.read_csv('products.csv')
products.info()
products.head()
%%time
order_products = pd.merge(order_products, products[['product_id', 'product_name']])
order_products.head()
```
#### Most popular products?
#### Organic?
### With Dask
https://examples.dask.org/dataframe.html
```
import dask.dataframe as dd
from dask.distributed import Client
```
#### Load & merge data
https://examples.dask.org/dataframes/01-data-access.html#Read-CSV-files
http://docs.dask.org/en/latest/dataframe-performance.html#persist-intelligently
#### Most popular products?
#### Organic?
## 4. Fit a machine learning model
### Load data
```
%cd ../ds-predictive-modeling-challenge
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
train_features = pd.read_csv('train_features.csv')
train_labels = pd.read_csv('train_labels.csv')
X_train_numeric = train_features.select_dtypes(np.number)
y_train = train_labels['status_group']
```
### With 2 cores (like Google Colab)
https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
```
model = RandomForestClassifier(n_estimators=200, oob_score=True, n_jobs=2, random_state=42, verbose=1)
model.fit(X_train_numeric, y_train)
print('Out-of-bag score:', model.oob_score_)
```
### With 16 cores (on AWS m4.4xlarge)
## ASSIGNMENT
Revisit a previous assignment or project that had slow speeds or big data.
Make it better with what you've learned today!
You can use `wget` or Kaggle API to get data. Some possibilities include:
- https://www.kaggle.com/c/ds1-predictive-modeling-challenge
- https://www.kaggle.com/ntnu-testimon/paysim1
- https://github.com/mdeff/fma
- https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2
Also, you can play with [Datashader](http://datashader.org/) and its [example datasets](https://github.com/pyviz/datashader/blob/master/examples/datasets.yml)!
| github_jupyter |
# Введение в искусственные нейронные сети
# Урок 1. Основы обучения нейронных сетей
## Содержание методического пособия:
<ol>
<li>Общие сведения о искусственных нейронных сетях</li>
<li>Место искусственных нейронных сетей в современном мире</li>
<li>Области применения</li>
<li>Строение биологической нейронной сети</li>
<li>История искусственных нейронных сетей</li>
<li>Небольшой пример по обучению простой нейронной сети</li>
<li>Основы обучения нейронных сетей</li>
<li>Инициализация весов. Функции активации</li>
<li>Обратное распространение ошибки и градиентный спуск</li>
<li>Пример построения двухслойной нейронной сети на numpy</li>
</ol>
## Общие сведения об искусственных нейронных сетях
В даннном разделе мы узнаем, что такое нейронные сети и из каких компонентов они состоят.
Искусственнная нейронная сеть - это программная реализация математической модели биологической нейронной сети. Ее цель — извлечь пользу из переданных ей данных. Например, сказать, что на фотографии или сделать какой либо прогноз.
Нейронные сети состоят из входных, внутренних и внешних слоев. Внутренние слои отвественны за обучение. Входные слои позволяют ввести данные во внутренние слои, а внешние слои позволяют вывести полезный вывод из данных. Сами слои состоят из отдельных нейронов, которые связаны с другими нейронами и по определенным алгоритмам способны усиливать с ними связь.
Через нейронные сети в процессе обучения многократно пропускают какие-либо данные, например изображения и указывают, что на них. За счет работы различных алгоритмов в нейронной сети вырабатывается такая система связей между нейронами, которая позволяет в последствии при переданных ей новых данных получить ответ, что на фотографии.
Но распознавание объектов на фотографиях далеко не единственная сфера применения нейронных сетей. Существует множество видов различных нейронных сетей для разных задач. Основные из этих видов мы разберем в данном курсе. Детальное рассмотрение процесса обучения нейронной сети и базовые сведения об ее архитекутуре будут разобраны во второй половине данного методического пособия.
## Место искусственных нейронных сетей в современном мире
В данном разделе мы разберем какие новшества принесли искусственные нейронные сети в современный мир. Долгое время создание искусственого интеллекта оставалось частью научной фантастики и несмотря на многие предсказания прошлого времени, что искусственный интеллект вот-вот будет создан, искусственный интеллект был создан совсем не давно. По крайней мере люди стали говорить, что они пользуются искусственным интеллектом. Как миним можно сказать, что искусственный интеллект создан с маркетинговой точки зрения. Но, не только с маркетинговой. Отчасти со сторого научной точки зрения он также создан — в 2014 году был пройден Тест Тьюрига.Тем не менее сейчас мы можем говорить только лишь о том, что создан специализированный искусственный интеллект, способный выполнять только определенные задачи, которые раньше было свойственно выполнять только человеку. Т.е. создан так называеймый «узкий искусственный интеллект». Создание общего искусственного ителлекта, т. е. программы, которая могла бы мыслить как человек, по прежнему остается фантастикой.
Искусственные нейронные сети, которые являются предметом изучения данного курса являются основой революции в области «узкого искусственного интеллекта» и являются одной из главных надежд для создания общего искусственного интеллекта в будущем. Однако искусственвенный интеллект может строиться не только на базе искусственных нейронных сетей. Более того как мы увидем в разделе посвященным истории, данных подход долгое время считался неперспективным. Так почему же революция совершена именно благодаря им, а не другим подходам? Дело в том, что компьютерные программы давно могут автоматизировать различную работу человека. Им можно дать определенный алгоритм работы и они будут его надежно выполнять. Программы как известно получают на вход данные, обрабатывают их с помощью определенного алгоритма и на выходе опять выдают данные.
Но возьмем к примеру завод. На нем есть много труда, который можно было автоматизировать. Не трудно составить алгоритм, например, для промышленного манипулятора, который будет переносить определенные тяжелые предметы. С помощью технологий перевода данных обработанных процессором в движения манипулятора можно заставить его совершить определенную полезное работу. Но как ввести данные для обработки? Как дать понять манипулятору какой предмет ему нужно взять? Здесь на помощь может прийти камера.
Долгое время различные алгоритмы компьютерного зрения справлялись с определенными несложными задачами наподобее определения предметов через цвет или его геометрическую форму.
Но что если нужно работать с предметами разного цвета и формы, но объеденных другими общими свойствами? Например сортировать яблоки и груши. Они могут быть разной формы и цвета, но тем не менее нужно отделить отдно от другого. Человеку не трудно справиться с подобной задачей но для искусственного интеллекта долгое время это оставалось не посильной задачей. Однако с применением глубоких искусственных нейронных сетей начиная с 2012 года выполнение этой задачи стало реальностью. Но не только в сфере распознования образов важную роль сыграли нейронные сети. Также в сфере распознования речи. И не только в сфере распознования, сейчас есть нейронные сети которые могут создавать изображения и синтезировать голос. Также они применяются в многих других сферах.
Подобный переворот случился благодаря тому, что нейронные сети стали повторять собою нейронные сети человеческого мозга, который как раз может легко справляться с подобной задачей. Об этом мы подробнее поговорим в разделе «Строение билогической нейронной сети».
Сейчас же давайте перейдем к разделу «Применение нейронных сетей», чтобы понять какие задачи они могут выполнять и к решению каких проблем реального мира нужно быть готовым специалистам по нейронным сетям.
## Применение нейронных сетей
В нашем курсе мы будем проходить различные виды нейронных сетей и будем рассматривать в соотвествующих уроках применение каждого вида отдельно. Здесь же в общих чертах нарисуем картину их применения.
Нейронные сети применяются в области компьютерного зрения. Начиная с обычных программ распознающих предметы в производстве или распознование личности на телефоне, заканчивая крупными комплексами компьютерного зрения, которые применяются в беспилотных автомобилях.
Нейронные сети применяются для распознования голоса — Siri, Google помощник, Alexa, Алиса и прочие голосовые помощники распознают голос человек с помощью нейронных сетей. Их применение не ограничивается только лишь распознованием голоса. В последние годы достигнут сущесветнный прогресс в синтезе голоса. Применяются они также в области машинного перевода. Прогресс в этой области как раз связан с ними.
Комбинация из нескольких нейронных сетей позволяет описать содержание фотографии.
Нейронные сети применяются в медицине — для анализа снимков и заключений о болезни человека в некоторых областях лучше чем человек.
Нейронные сети также применяются для анализа поведения покупателей, а также на бирже. Находят они применения также в прогнозах погоды.
Нейронные сети находят также применение для творчества. Уже сейчас есть художники которые продают картины которые рисуют нейронные сети.
Есть и другие интересные области применения нейронных сетей. Например они используются для придания цвета старым фотографиям или фильмам. В улучшении качетства изображений. Для чтения по губам. Для генерации фотографий и много другого.
## Строение биологической нейронной сети
Термин «Нейрон» был введен Г. В. Вальдейером в 1891 г. Что из себя предствляет человечекий нейрон? Часто можно услышать его сравнение с транизистором. Однаком более убедительным является точка зрения нейрофизиологов утверждающих, что нейрон это полноценный компьютер. Человеческий нейрон также как и компьютер призван обрабатывать сигналы.
То, что человеческое сознание является результатом работы нейронов было впервые отмечено в работах Александра Бейна (1873) и Уильяма Джеймса (1890). В работах Бейна впервые акт запоминания был сформулирован как акт укрепление нейронных связей.
В нейронах мозга есть аксоны, через которые в него попадают сигналы, у искусственных нейронов также есть подобные входы. У нейронов мозга есть выходной компонент дендрит и искуственных нейронов также есть выходные компоненты. У нейронов человеческого мозга насчитывается порядка 10 000 связей с другими нейронами у искусственных нейронов их также может быть много. И биологической нейронной сети и в искусственной в случае если сумма поступающих сигналов в нейрон превышает определенный порог то сигнал передается следующему нейрону.
Искусственные нейронные сети работают подобно биологическим — через них пропускаются данные и те связи которые ведут к нужным результатам укрепляются. Но если в человеческом мозге проихсходит засчет утолщения этих связей, то в программных нейронных сетях это происходит за счет увеличение чисел символизирующих эти связи.
## История исскусственных нейронных сетей
Представление о нейронных сетях как главном способе создания искусственного интеллекта сложилось далеко не сразу и в данном разделе мы осветим основные вехи этой технологии.
Математическую модель искусственного нейрона была предложена У. Маккалоком и У. Питтсом в 50-х годах 20 века.
В виде компьютерной системы нейронная сеть была впервые реализована Фрэнком Розенблаттом в 1960 году. Фрэнк Розенблат создал «Марк-1», который являлся программно-аппаратным комлексом и воплащал в себе простую нейронную сеть в один слой. Можно отметить, что современные сети (на момент ноября 2019 г.) включают в себя 50 — 150 слоев, а экспериментальные достигают размера более 1000 слоев.
Однако волна энтузиазма 60-х годов в отношении искусственных нейронных сетей сменилась скепсисом в отношении них из-за трудностей в совершении определенных логических операций и невозможности получать практические результаты. Доминирующим подходом для создания искусственного интеллекта стали экспертные системы. Они являлись по сути продвинутой энциклопедией знаний в той или иной сфере.
Однако в 2012 г. появились глубокие нейронные сети. Т.е. сети с количеством внутренних слоев больше одного. Сначала кол-во слоев таких нейронных сетях было примерно 3-10. Но в силу эффективности подхода по увеличению кол-ва слоев, быстро появились нейронные сети кол-во слоев которых стало исчисляться десятками.
Именно с 2012 г. нейросети стали считаться доминирующим способом в решении многих задач искусственного интеллекта. Стоит задаться вопросом: почему именно в этот период? Прорыв был обеспечен более продвинутой архитектурой, возросшими количеством хранимых компьютерами данных для тренировки нейронных сетей, а также возросшей вычислительной мощностью компьютеров. Также можно отметить появления в эту пору возможности использовать видеокарты для тренировки нейронных сетей, которые оказались лучше приспособленными для решения этих задач чем классические CPU.
## Общие сведения о том, как происходит обучение нейронных сетей
Давайте возьмем какую-нибудь типичную задачу для нейросети и попробуем разобрать как она могла бы ее решить. В качества примера задачи можем взять типичный пример — определить на фотографии находиться кот или собака.
Сначала давайте подумаем как это данная задача решалась бы без нейронной сети. Вспомним, что фотография это набор пикселей. Пиксели в компьютере репрезентуются матрицей чисел. Если бы все коты были синего цвета, а собаки красного мы могли бы просто детектировать числа отвественные за данные цвета в изображении и на основаннии этого делать выводы о том кот или собака расположены на фотографии. В действительности как мы понимаем это не так. У кота очень много отличительных черт как и у собаки. Перечесление этих уникальных свойсвт займет долгое время. Но стоит учесть вот какое обстоятесльво - усики и лапки и шерстка есть у многих животных. И скорее всего все что нам остается делать это описывать размеры этих усиков, лапок и т. д. Изучать их угла наклона и т. д. Попытки делать это вручную были, как раз до эпохи нейронных сетей. Но результаты были не высокие. Как вы понимаете объем признаков слишком большой.
Таким образом задача нейронной сети содержать в себе необходимый набор признаков которые позволяет ей отличить один объект от другого. В данном уроке будет разобран пример разработки нейронной сети, которая будет обучаться различать разновидности цветка ириса. Данные виды похожи, они состоят из одинаковых элементов, но эти элементы имеют разные размеры в случаи каждого отдельного вида. На этом простом примере мы сможем понять как работают более сложные нейронные сети.
Но встает вопрос как поместить в нейронную сеть необходимый набор признаков? Ответ на этот вопрос будет даваться в течении всего данного курса и всеравно не будет исчерпывающим. Поскольку для это придется обучать нейронную сеть, а ее обучение порой преобретает характер искусства нежели набор предписаний.
Тем не менее в процессе обучения всегда нужно пройти ряд этапов, которые будут общими для любого процесса обучения.
Нам определиться с архитектурой нейронной сети. От удачно выбранной архитектуры будет зависеть насколько быстро мы сможем обучить нейронную сеть, насколько точной она будет, а также сможем ли мы ее обучить в принципе.
Архитектура нейронной сети как вы догадываетесь зависит от задачи. В зависимости от задачи нам нужно будет выбрать:
- Количество слоев из которых будет состоять нейронная сеть
- Сколько будет нейронов в этих слоях
Кроме этого нам нужно будет подобрать верным образом компоненты, за счет которых будет обучаться нейронная сеть (подробнее о них можно будет узнать в следующих разделах данного мет. пособия)
- Систему инициализации весов нейронов
- Функцию активации весов нейронов
- Алгоритм корректировки весов нейронов
Это безусловно список самых общих компонентов с которыми нам нужно будет определиться, но на данной стадии обучения надо полагать нам их будет достаточно.
Архитектуры нейронных сетей мы будем изучать на отдельных уроках этого курса. В этом же уроке мы разберем следующие неотъемлимые компоненты обучения нейронной сети любой архитектуры — инициализация весов, их корректировка, рассмотрим виды функций активации весов нейронной сети, разберем, что такое градиентный спуск и метод обратного распространения ошибки.
## Инициализация весов. Функции активации

Мы с вами выяснили, что в процессе обучения нейронной сети в ней должны вырабататься признаки по которым она сможет определять, что на фотографии. Но как эти признаки будут репрезентованы в нейронной сети? Все эти признаки будут состовлять определенную сложную матрицу чисел. Настолько сложную, что человеку не под силу ее проанализировать. Какие это будут признаки? Это тоже неизвестно. Их может быть так много и они могут быть такими специфическими, что и описать их будет трудно. Для этого и нужны нейросети, они берут на себя содержание и описание в себе этих признаков.
Однако все эти признаки записываются через систему весов нейронов. Веса нейрона отражают толщину связи этого нейрона с др. нейроном. Из совокупности этих связей и состоят признаки. Т.е. другими словами обучить нейронную сеть значит найти нужные веса для ее нейронов.
Первоначально веса можно задать случайными числами. И в процессе обучения они будут все больше и больше подходить под решения задачи. Программно это можно показать на примере следующего фрагмента кода.
```
import numpy as np
import random
import matplotlib.pyplot as plt
# генерации случайных чисел для инициализации весов
np.random.seed(1)
synaptic_weights = 2 * np.random.random((3, 1)) - 1
w1 = 2 * np.random.random((1, 2)) - 1
w2 = 2 * np.random.random((2, 2)) - 1
# можем посмотреть
print(w1)
print(w2)
```
Но мало просто присвоить определенные веса нейронам. Необходимо также определить функцию по которой будет активироваться нейроны. Выбранная функция активация будет одинаковой для всех нейронов. С помощью нее можно определять с какой силой нужно подействовать на нейрон, чтобы он активировался и передал сигнал дальше.

На картинке выше показан нейрон в виде формулы. Активация его как говорилась зависит от входных данных, его весов и некоего порогого значения.
Функций таких существует очень много - сигмоида, линейная, ступенчатая, ReLu, tahn и т. д. Нам скорее сейчас нужно понять их суть изложенную выше нежели чем научиться их выбирать. Отметим лишь следующее. Для учебных целей часто можно встретить сигмоиду. Для реальных задач — ReLu.

В данном фрагменте кода мы можем посмотреть как программно реализовать функцию сигмоид -
```
# вычисление сигмоид функции
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# для картинки
D = 10
X = np.linspace(0-D,0+D,20)
Y = sigmoid(X)
plt.plot(X , Y ,'-g',label = 'сигмоид' )
plt.legend()
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
```
А вот, как например на Python, можно реализовать упомянутую выше функцию активации Relu -
```
# вычисление Relu функции
def ReLU(x):
return x * (x > 0)
# для картинки
D = 10
X = np.linspace(0-D,0+D,21)
Y = ReLU(X)
plt.plot(X , Y ,'-g',label = 'ReLU' )
plt.legend()
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
```
## Обратное распространение ошибки и градиентный спуск
Давайте прежде чем начнем разбирать обратное распространение и градиентный спуск, скажем несколько слов о тех типах архитектур нейронной сети, которые будут использованы в данном уроке. Это простые нейронные сети. С точки зрения архитектуры их можно отнести к полносвязным нейронным сетям так как все нейроны связаны между собой. С другой стороны их можно отнести к нейронным сетям прямого распространения(feed forward). Сети прямого распространения подразумевает распространения сигналы от вход к выходу, в отличии от реккурентных нейронных сетей где во внутренних слоях сигналы могут ходить по циклу. О плюсах и минусах сетей полносвязных мы поговорим в уроке посвященном Сверточным нейронным сетям, поскольку данные нейронные сети отошли от этого подхода и нам нужно будет выяснить почему.
После того как на выходе нейронной сети мы получили определенное цифровое значение нам нужно сравнить его с искомым. Мы можем посчитать насколько в количественом выражение ошиблась нейросеть. Задача обратного распространения ошибки пройтись от выхода ко входу и скорректировать веса нейронов. Это процесс происходит множество раз в процесс обучения.
Можно сказать что процесс обучения нейронной сети это попытка оптимизировать веса
нейронной сети до тех пор пока не будет достигнута минимальная степень ошибки. Для этого хорошо подходит такой алгоритм как градиентный спуск. Суть данного метода заключается в том, чтобы искать такие числовые параметры при которых значение ошибки достигнет нуля. Градиентным он называется потому что это процесс пошаговый, требующий одно вычисление за другим. Спуском он называется потому что значение ошибки должно быть как можно меньше.
Обратите внимание на следующий график из него видно, что есть определенное число по шкале весов которому соотсвествует минимальное значение по шкале Error. Это число и нужно находить в процессе обучения нейронных сетей.

Давайте попробуем реализовать программного градиентный спуск, чтобы лучше понять как он работает.
```
'''
Исходный код к уроку 1.
Демонстрация работы градиентного спуска
'''
# первоначальное точка
start_point = 1
# размер шага(learning rate)
learn_r = 0.01
# установка первоначальной точности
precision = 0.0001
# функция градиента для y = X**4 - 3 * X**3
gr_func = lambda x: 4 * x**3 - 9 * x**2
# для картинки
D = 1
X = np.linspace(2.2-D,2.2+D,20)
Y = X**4 - 3 * X**3
# начальная точка
next_point = start_point
iter = 0
x = []
x.append(next_point)
plt.figure(figsize=(16,2))
plt.plot(X, Y ,'r',label = 'Y(X)' )
# количество итерация
n = 150
for i in range(n):
current_point = next_point
# движение в негативную сторону вычисляемого градиента
next_point = current_point - learn_r*gr_func(current_point)
x.append(next_point)
# print(next_point)
iter += 1
# остановка когда достигнута необходимая степень точности
print(f"Итерация: {iter}")
print(f"Текущая точка {current_point}| След-я точка {next_point}")
print(f"Дистан-я между текущей точк. и след. {abs(current_point - next_point)}")
print("--------------------------------------------------------")
if(abs(current_point - next_point) <= precision):
break
print(f"минимум {next_point}, количество затраченных итераций: {iter}")
X_grad = np.array(x)
plt.plot(X_grad , (X_grad **4 - 3 * X_grad **3) ,'-*g',label = 'GD' )
plt.legend()
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
```
Метод обратного распрасранения (backpropogation) самый популярный способ обучения нейронных сетей, однако у него есть несколько альтернатив - Метод упругого распространения (Resilient propagation или Rprop) и генетический алгоритм (Genetic Algorithm). Rprop для корректировки весов и смещений использует знак градиента, а не его значение, а генетический алгоритм для задач оптимизации и моделирования использует случайный подбор.
## Небольшой пример по обучению простой нейронной сети
Рассмотрим создание простой нейронной сети. Данная нейронная сеть будет обучаться предсказывать 4 число на основании первых трех. Для этого мы ей передадим обучающую набор данных из трех последовательностей чисел. В каждой последовательности чисел будет три первых числа в качетсве входных данных и 4 число которое следуюет за этими данными.
После обучения нейронной сети мы просим пользователя ввести 3 числа и программа выдаст 4 число в качестве предсказания.
В данном примере будут использоваться следующие обучающие последовательсноти:
Данные 1| 0 0 1 0
Данные 2| 1 1 1 1
Данные 3| 1 0 1 1
Данные 4| 0 1 1 0
Здесь можно заметить, что четвертое число всегда соотвествует первому. Эту закономерность и должна благодаря коду ниже научиться обнаруживать нейронная сеть, а затем на произвольных данных введенных пользователем выдать правильное предсказание.
Этот пример позволяет создать и запустить простейшую нейронную сеть, но уже в этом примере присутсвуют все необходимые атрибуты создания нейронных сетей: подготовка данных для обучения модели, конфигурация модели, запуск модели.
Дальше мы разберем, что такое функция активации, обратное распространение, внутренние слои нейронной сети и прочие аспекты создания нейронных сетей.
Обратите внимание, что здесь используется библиотека numpy для получения дополнительных команд при работе с массивами. Более подробно данную библиотеку мы рассмотрим позже.
```
'''
Исходный код к уроку 1.
Пример простой нейронной сети на numpy
'''
import numpy as np
# генерации случайных чисел для инициализации весов
np.random.seed(1)
synaptic_weights = 2 * np.random.random((3, 1)) - 1
# вычисление сигмоид функции
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# вычисление производной от сигмоид функции
def sigm_deriv(x):
return x * (1 - x)
# вычисление Relu функции
def ReLU(x):
return x * (x > 0)
# вычисление производной от Relu функции
def relu_deriv(x):
return x>0
# для картинки
D = 10
X = np.linspace(0-D,0+D,210)
Y = sigmoid(X)
dY = sigm_deriv(sigmoid(X))
plt.figure(figsize =(16,4))
plt.subplot(1,2,1)
plt.plot(X , Y ,'-g',label = 'сигмоид' )
plt.plot(X , dY ,'-r',label = 'd сигмоид' )
plt.grid('On')
plt.legend()
plt.xlabel('X')
plt.ylabel('Y')
Yr = ReLU(X)
dYr = relu_deriv(ReLU(X))
plt.subplot(1,2,2)
plt.plot(X , Yr ,'-g',label = 'ReLU' )
plt.plot(X , dYr ,'-r',label = 'd ReLU' )
plt.legend()
plt.grid('On')
plt.xlabel('X')
plt.ylabel('Yr')
plt.show()
# тренировка нейронной сети
def train_nn(training_inputs, training_outputs, training_iterations):
global synaptic_weights
for iteration in range(training_iterations):
# перекачивание данных через нейрон
output = run_nn(training_inputs)
# вычисление ошибки через обратное распространение back-propagation
error = training_outputs - output
# выполнение корректировки весов
adjustments = np.dot(training_inputs.T, error * sigm_deriv(output))
synaptic_weights += adjustments
# пропускание входных данных через нейрон и получение предсказания
# конвертация значений во floats
def run_nn(inputs):
global synaptic_weights
inputs = inputs.astype(float)
output = sigmoid(np.dot(inputs, synaptic_weights))
return output
# создание данных для обучения
training_inputs = np.array([[0,0,1], [1,1,1], [1,0,1], [0,1,1]])
training_outputs = np.array([[0,1,1,0]]).T
# запуск тренировки нейронной сети
train_nn(training_inputs, training_outputs, 10000)
print("веса после завершения обучения: ")
print(synaptic_weights)
# получение трех чисел от пользователя
user_inp1 = str(input("Первое число(0 или 1): "))
user_inp2 = str(input("Второе число(0 или 1): "))
user_inp3 = str(input("Третье число(0 или 1): "))
print(f"Проверка на новых данных: {user_inp1} {user_inp2} {user_inp3}")
print("Предсказание нейронной сети: ")
print(run_nn(np.array([user_inp1, user_inp2, user_inp3])))
print(run_nn(np.array([0,0,0])))
```
## Пример построения двухслойной нейронной сети на numpy
```
'''
Исходный код к уроку 1.
Построение двухслойной нейронный сети для классификации цветков ириса
'''
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# sklearn здесь только, чтобы разделить выборку на тренировочную и тестовую
from sklearn.model_selection import train_test_split
### Шаг 1. Определение функций, которые понадобяться для обучения
# преобразование массива в бинарный вид результатов
def to_one_hot(Y):
n_col = np.amax(Y) + 1
binarized = np.zeros((len(Y), n_col))
for i in range(len(Y)):
binarized[i, Y[i]] = 1.
return binarized
# преобразование массива в необходимый вид
def from_one_hot(Y):
arr = np.zeros((len(Y), 1))
for i in range(len(Y)):
l = Y[i]
for j in range(len(l)):
if(l[j] == 1):
arr[i] = j+1
return arr
# сигмоида и ее производная
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_deriv(x):
return (x)*(1 - (x))
# нормализация массива
def normalize(X, axis=-1, order=2):
l2 = np.atleast_1d(np.linalg.norm(X, order, axis))
l2[l2 == 0] = 1
return X / np.expand_dims(l2, axis)
### Шаг 2. Подготовка тренировочных данных
# получения данных из csv файла. укажите здесь путь к файлу Iris.csv
iris_data = pd.read_csv("Iris.csv")
# print(iris_data.head()) # расскоментируйте, чтобы посмотреть структуру данных
# репрезентация данных в виде графиков
g = sns.pairplot(iris_data.drop("Id", axis=1), hue="Species")
plt.show() # расскоментируйте, чтобы посмотреть
# замена текстовых значений на цифровые
iris_data['Species'].replace(['Iris-setosa', 'Iris-virginica', 'Iris-versicolor'], [0, 1, 2], inplace=True)
# формирование входных данных
columns = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']
x = pd.DataFrame(iris_data, columns=columns)
#x = normalize(x.as_matrix())
x = normalize(x.values)
# формирование выходных данных(результатов)
columns = ['Species']
y = pd.DataFrame(iris_data, columns=columns)
#y = y.as_matrix()
y = y.values
y = y.flatten()
y = to_one_hot(y)
# Разделение данных на тренировочные и тестовые
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33)
y
def neural_network(neuron_numb, l_r, epoches):
w0 = 2*np.random.random((4, neuron_numb)) - 1 # для входного слоя - 4 входа, 3 выхода
w1 = 2*np.random.random((neuron_numb, 3)) - 1 # для внутреннего слоя - 5 входов, 3 выхода
errors = []
for i in range(epoches):
# прямое распространение(feed forward)
layer0 = X_train
layer1 = sigmoid(np.dot(layer0, w0))
layer2 = sigmoid(np.dot(layer1, w1))
# обратное распространение(back propagation) с использованием градиентного спуска
layer2_error = y_train - layer2 # производная функции потерь = производная квадратичных потерь
layer2_delta = layer2_error * sigmoid_deriv(layer2)
layer1_error = layer2_delta.dot(w1.T)
layer1_delta = layer1_error * sigmoid_deriv(layer1)
w1 += layer1.T.dot(layer2_delta) * l_r
w0 += layer0.T.dot(layer1_delta) * l_r
# метрика модели
error = np.mean(np.abs(layer2_error))
errors.append(error)
accuracy = (1 - error) * 100
# plt.figure(figsize = (16,5))
# plt.plot(errors)
# plt.xlabel('Обучение')
# plt.ylabel('Ошибка')
# plt.show() # расскоментируйте, чтобы посмотреть
# N = 50
# plt.figure(figsize = (16,5))
# plt.plot(layer2[:N,1], 'r',label = 'Y new')
# plt.plot(y_train[:N,1],'g', label = 'Y train')
# plt.xlabel('№ примера')
# plt.ylabel('выход сети и целевой')
# plt.legend( )
# plt.show() # расскоментируйте, чтобы посмотреть
print("Аккуратность нейронной сети " + str(round(accuracy,2)) + "%")
# return round(accuracy,2)
# прямое распространение(feed forward)
layer0_t = X_test
layer1_t = sigmoid(np.dot(layer0_t, w0))
layer2_t = sigmoid(np.dot(layer1_t, w1))
layer2_error_t = y_test - layer2_t
# N = 50
# plt.figure(figsize = (16,5))
# plt.plot(layer2_t[:N,1], 'r',label = 'Y new')
# plt.plot(y_test[:N,1],'g', label = 'Y train')
# plt.xlabel('№ примера')
# plt.ylabel('выход сети и целевой')
# plt.legend( )
# plt.show() # расскоментируйте, чтобы посмотреть
# метрика модели
error_t = np.mean(np.abs(layer2_error_t))
accuracy_t = (1 - error_t) * 100
print("Аккуратность нейронной сети на тесте " + str(round(accuracy_t,2)) + "%")
neural_network(neuron_numb=5, l_r=0.1, epoches=3000)
neural_network(neuron_numb=10, l_r=0.1, epoches=3000)
neural_network(neuron_numb=50, l_r=0.1, epoches=3000)
neural_network(neuron_numb=7, l_r=0.4, epoches=3000)
neural_network(neuron_numb=5, l_r=1, epoches=3000)
neural_network(neuron_numb=5, l_r=5, epoches=3000)
neural_network(neuron_numb=5, l_r=1, epoches=300)
neural_network(neuron_numb=5, l_r=1, epoches=30000)
```
### Вывод:
Для улучшения точности нейронной сети нужно выбирать адекватные параметры, для этой модели значения в 5 скрытых нейронов, шаг обучения 1 и самое большое количество эпох (30000) показало лучший результат.
Количество скрытых нейронов должно лежать в промежутке от 3 до 7. При увеличении числа нейронов точность не сильно изменяется, однако, параметров становится на порядки больше, что ведет к увеличению затрат мощности.
Скорость обучения показала хорошие результаты в рамках [0.1; 1]. При увеличении скорости обучения алгоритм не сходится в самой нижней точке на поверхности ошибок, что не позволяет дойти до минимальной ошибки.
С каждой эпохой величина весов усиливается, что дает более точное предсказание. При очень больших количествах эпох сеть может переобучится, выкручивая в максимум и минимум веса, что неоднозначно скажется на сложных задачах.
```
### Шаг 3. Обученние нейронной сети
# определим число нейронов скрытого слоя
neuron_numb = 5
# присваевание случайных весов
w0 = 2*np.random.random((4, neuron_numb)) - 1 # для входного слоя - 4 входа, 3 выхода
w1 = 2*np.random.random((neuron_numb, 3)) - 1 # для внутреннего слоя - 5 входов, 3 выхода
# скорость обучения (learning rate)
n = 0.1
# массив для ошибок, чтобы потом построить график
errors = []
# процесс обучения
for i in range(3000):
# прямое распространение(feed forward)
layer0 = X_train
layer1 = sigmoid(np.dot(layer0, w0))
layer2 = sigmoid(np.dot(layer1, w1))
# обратное распространение(back propagation) с использованием градиентного спуска
layer2_error = y_train - layer2 # производная функции потерь = производная квадратичных потерь
layer2_delta = layer2_error * sigmoid_deriv(layer2)
layer1_error = layer2_delta.dot(w1.T)
layer1_delta = layer1_error * sigmoid_deriv(layer1)
w1 += layer1.T.dot(layer2_delta) * n
w0 += layer0.T.dot(layer1_delta) * n
# метрика модели
error = np.mean(np.abs(layer2_error))
errors.append(error)
accuracy = (1 - error) * 100
### Шаг 4. Демонстрация полученных результатов
# черчение диаграммы точности в зависимости от обучения
plt.figure(figsize = (16,5))
plt.plot(errors)
plt.xlabel('Обучение')
plt.ylabel('Ошибка')
plt.show() # расскоментируйте, чтобы посмотреть
N = 50
plt.figure(figsize = (16,5))
plt.plot(layer2[:N,1], 'r',label = 'Y new')
plt.plot(y_train[:N,1],'g', label = 'Y train')
plt.xlabel('№ примера')
plt.ylabel('выход сети и целевой')
plt.legend( )
plt.show() # расскоментируйте, чтобы посмотреть
print("Аккуратность нейронной сети " + str(round(accuracy,2)) + "%")
# прямое распространение(feed forward)
layer0_t = X_test
layer1_t = sigmoid(np.dot(layer0_t, w0))
layer2_t = sigmoid(np.dot(layer1_t, w1))
layer2_error_t = y_test - layer2_t
N = 50
plt.figure(figsize = (16,5))
plt.plot(layer2_t[:N,1], 'r',label = 'Y new')
plt.plot(y_test[:N,1],'g', label = 'Y train')
plt.xlabel('№ примера')
plt.ylabel('выход сети и целевой')
plt.legend( )
plt.show() # расскоментируйте, чтобы посмотреть
# метрика модели
error_t = np.mean(np.abs(layer2_error_t))
accuracy_t = (1 - error_t) * 100
print("Аккуратность нейронной сети на тесте " + str(round(accuracy_t,2)) + "%")
```
В этом уроке мы с вами рассматривали как сделать простые нейронные сети без использования специальных фреймворков и библиотек для этого. В следующих уроках мы с вами познакомимся как делать нейронные сети с помощью Keras и TensorFlow.
## Домашнее задание
1. Попробуйте видоизменить параметры разобранной на уроке двухслойной нейронной сети таким образом, чтобы улучшить ее точность (число нейронов, число эпох , можно изменять число слоев).
2. Проведите анализ — что приводит к ухудшению точности нейронной сети? Что приводит к увеличению ее точности?
Мы разобрались с основами обучения нейронных сетей и получили некоторое представление об архитектурах простых нейронных сетей. Давайте попробуем закрепить эти знания на практике. Кроме того на примере который будет изложен ниже возможно проясняться какие-либо оставшиеся вопросы.
В данном примере мы сделаем нейронную сеть которая будет отличать различные виды ириса между собой. Надо полагать данный датасет вам уже знаком. Логика работы этого кода будет такой же как и в первом разобранном примере, но только все компоненты этого кода будут несколько усложнены.
## Дополнительные материалы
<ol>
<li>https://medium.com/topic/machine-learning</li>
</ol>
## Используемая литература
Для подготовки данного методического пособия были использованы следующие ресурсы:
<ol>
<li>Глубокое обучение — Николенко С. И., Кадурин 2018</li>
<li>Шакла Н. — Машинное обучение и TensorFlow 2019</li>
<li>Asifullah Khan, Anabia Sohail, Umme Zahoora, Aqsa Saeed Qureshi - A Survey of the Recent Architectures of Deep Convolutional Neural Networks 2019</li>
<li>A direct adaptive method for faster backpropagation learning: the RPROP algorithm - Neural Networks, 1993</li>
<li>Википедия</li>
</ol>
| github_jupyter |
```
import numpy as np
import cv2
import tensorflow as tf
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = tf.keras.models.load_model("/home/d3adsh0t/Tunex/8")
# EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised","neutral"]
# EMOTIONS=["angry",
# "disgust",
# "happy",
# "neutral",
# "sad",
# "surprise"]
EMOTIONS = ["afraid","angry","disgust","happy","neutral","sad","surprised"]
def prepare(ima):
IMG_SIZE = 48 # image size
img_array = cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
img_array=img_array/255.0
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing
return new_array.reshape(-1,IMG_SIZE, IMG_SIZE,1)
```
# Static Test
```
image=cv2.imread("afraid.jpeg")
# faces = face_cascade.detectMultiScale(image, 1.3, 5)
# faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]
# (x,y,w,h)=faces
# roi = image[y-40:y+h+40, x:x+w]
prediction = model.predict([prepare(image)])
preds = prediction[0]
label = EMOTIONS[preds.argmax()]
print(label)
# image = cv2.rectangle(image,(x,y-40),(x+w,y+h+40),(255,0,0),2)
cv2.imshow("image",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
# Live Test
```
# cap=cv2.VideoCapture("test3.mp4")
cap=cv2.VideoCapture(0)
# result = cv2.VideoWriter('1testface.avi',cv2.VideoWriter_fourcc(*'MJPG'), 30, (540, 960))
while True:
ret, img=cap.read()
# print(img.shape)
# img = cv2.resize(img, (540, 960))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
canvas = np.zeros((256,256,3), dtype="uint8")
frameclone=img
try:
faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]
(x,y,w,h)=faces
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi = img[y:y+h, x:x+w]
cv2.imshow('img2',roi)
prediction = (model.predict([prepare(roi)]))
preds = prediction[0]
label = EMOTIONS[preds.argmax()]
for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
text = "{}: {:.2f}%".format(emotion, prob*100)
w = int(prob*300)
cv2.rectangle(canvas, (7, (i*35)+5), (w, (i*35)+35),(0,0,255), -1)
cv2.putText(canvas, text, (10, (i*35) +23), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255,255,255), 2)
cv2.imshow("Probabilities", canvas)
cv2.putText(img,label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
result.write(img)
except:
pass
cv2.imshow('img',img)
cv2.waitKey(1)
if cv2.waitKey(1) & cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
```
# Test on static Validation data
```
for j in range(0,7):
right_count=0
wrong_count=0
for i in range(1,50):
# try:
img=cv2.imread("/home/arjun/DM/Face/validation/"+str(j)+"/"+str(i)+".jpg")
# cv2.imshow("image",img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# faces = face_cascade.detectMultiScale(img, 1.3, 5)
# print(faces)
# faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]
# (x,y,w,h)=faces
# roi = image[y-20:y+h, x:x+w]
pr=model.predict([prepare(img)])
preds=pr[0]
label = EMOTIONS[preds.argmax()]
if(label==EMOTIONS[j]):
right_count+=1
else:
wrong_count+=1
# except:
# pass
print(EMOTIONS[j])
print("Right "+str(right_count)+" Wrong "+str(wrong_count))
```
```
angry
Right 20 Wrong 29
disgust
Right 30 Wrong 19
fear
Right 23 Wrong 26
happy
Right 40 Wrong 9
neutral
Right 26 Wrong 23
sad
Right 32 Wrong 17
surprise
Right 34 Wrong 15
```
| github_jupyter |
# T005 · Compound clustering
Authors:
- Gizem Spriewald, CADD Seminar, 2017, Charité/FU Berlin
- Calvinna Caswara, CADD Seminar, 2018, Charité/FU Berlin
- Jaime Rodríguez-Guerra, 2019-2020, [Volkamer lab](https://volkamerlab.org), Charité
__Talktorial T005__: This talktorial is part of the TeachOpenCADD pipeline described in the [first TeachOpenCADD paper](https://jcheminf.biomedcentral.com/articles/10.1186/s13321-019-0351-x), comprising of talktorials T001-T010.
## Aim of this talktorial
<!-- TODO: The wording of this paragraph is confusing -->
Similar compounds might bind to the same targets and show similar effects.
Based on this similar property principle, compound similarity can be used to build chemical groups via clustering.
From such a clustering, a diverse set of compounds can also be selected from a larger set of screening compounds for further experimental testing.
### Contents in _Theory_
* Introduction to clustering and Jarvis-Patrick algorithm
* Detailed explanation of Butina clustering
* Picking diverse compounds
### Contents in _Practical_
* Clustering with the Butina algorithm
* Visualizing the clusters
* Picking the final list of compounds
* Bonus: analysis of run times
### References
* Butina, D. Unsupervised Data Base Clustering Based on Daylight’s Fingerprint and Tanimoto Similarity: A Fast and Automated Way To Cluster Small and Large Data Set. _J. Chem. Inf. Comput. Sci._ (1999)
* Leach, Andrew R., Gillet, Valerie J. An Introduction to Chemoinformatics (2003)
* [Jarvis-Patrick Clustering](http://www.improvedoutcomes.com/docs/WebSiteDocs/Clustering/Jarvis-Patrick_Clustering_Overview.htm)
* [TDT Tutorial](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb)
* [RDKit clustering documentation](http://rdkit.org/docs/Cookbook.html#clustering-molecules)
## Theory
### Introduction to clustering and Jarvis-Patrick algorithm
[Clustering](https://en.wikipedia.org/wiki/Cluster_analysis) can be defined as _the task of grouping a set of objects in such a way that objects in the same group (called a cluster) are more similar (in some sense) to each other than to those in other groups (clusters)_.
Compound clustering in pharmaceutical research is often based on chemical or structural similarity between compounds to find groups that share properties as well as to design a diverse and representative set for further analysis.
General procedure:
* Methods are based on clustering data by similarity between neighboring points.
* In cheminformatics, compounds are often encoded as molecular fingerprints and similarity can be described by the Tanimoto similarity (see **Talktorial T004**).
> Quick reminder:
>
> * Fingerprints are binary vectors where each bit indicates the presence or absence of a particular substructural fragment within a molecule.
> * Similarity (or distance) matrix: The similarity between each pair of molecules represented by binary fingerprints is most frequently quantified using the Tanimoto coefficient, which measures the number of common features (bits).
> * The value of the Tanimoto coefficient ranges from zero (no similarity) to one (high similarity).
There are a number of clustering algorithms available, with the [Jarvis-Patrick clustering](http://www.improvedoutcomes.com/docs/WebSiteDocs/Clustering/Jarvis-Patrick_Clustering_Overview.htm) being one of the most widely used algorithms in the pharmaceutical context.
Jarvis-Patrick clustering algorithm is defined by two parameters $K$ and $K_{min}$:
* Calculate the set of $K$ nearest neighbors for each molecule.
* Two molecules cluster together if
* they are in each others list of nearest neighbors
* they have at least $K_{min}$ of their $K$ nearest neighbors in common.
The Jarvis-Patrick clustering algorithm is deterministic and able to deal with large sets of molecules in a matter of a few hours. However, a downside lies in the fact that this method tends to produce large heterogeneous clusters (see _Butina clustering_, referenced above).
More clustering algorithms can also be found in the [scikit-learn clustering module](http://scikit-learn.org/stable/modules/clustering.html).
### Detailed explanation of Butina clustering
Butina clustering ([*J. Chem. Inf. Model.* (1999), **39** (4), 747](https://pubs.acs.org/doi/abs/10.1021/ci9803381)) was developed to identify smaller but homogeneous clusters, with the prerequisite that (at least) the cluster centroid will be more similar than a given threshold to every other molecule in the cluster.
These are the key steps in this clustering approach (see flowchart below):
#### 1. Data preparation and compound encoding
* To identify chemical similarities, the compounds in the input data (e.g. given as SMILES) will be encoded as molecular fingerprints, e.g., RDK5 fingerprint which is a subgraph-based fingerprint similar to the well known [Daylight Fingerprint](http://www.daylight.com/dayhtml/doc/theory/theory.finger.html) (which was used in the original publication).
#### 2. Tanimoto similarity (or distance) matrix
* The similarity between two fingerprints is calculated using the Tanimoto coefficient.
* Matrix with Tanimoto similarities between all possible molecule/fingerprint pairs ($n*n$ similarity matrix with $n$=number of molecules, upper triangle matrix used only).
* Equally, the distances matrix can be calculated ($1 - similarity$).
#### 3. Clustering molecules: Centroids and exclusion spheres
> Note: Molecules will be clustered together, if they have a maximum distance below a specified cut-off from the cluster centroid (if distance matrix is used) or if they have a minimum similarity above the specified cut-off (if similarity matrix is used).
* **Identification of potential cluster centroids**
* The cluster centroid is the molecule within a given cluster which has the largest number of neighbors.
* Annotate neighbors: For each molecule count all molecules with a Tanimoto distance below a given threshold.
* Sort the molecules by their number of neighbors in descending order, so that potential cluster centroids (i.e. the compounds with the largest number of neighbors) are placed at the top of the file.
* **Clustering based on the exclusion spheres**
* Starting with the first molecule (centroid) in the sorted list.
* All molecules with a Tanimoto index above or equal to the cut-off value used for clustering then become members of that cluster (in case of similarity).
* Each molecule that has been identified as a member of the given cluster is flagged and removed from further comparisons. Thus, flagged molecules cannot become either another cluster centroid or a member of another cluster. This process is like putting an exclusion sphere around the newly formed cluster.
* Once the first compound in the list has found all its neighbors, the first available (i.e. not flagged) compound at the top of the list becomes the new cluster centroid.
* The same process is repeated for all other unflagged molecules down the list.
* Molecules that have not been flagged by the end of the clustering process become singletons.
* Note that some molecules assigned as singletons can have neighbors at the given Tanimoto similarity index, but those neighbors have been excluded by a stronger cluster centroid.
```
from IPython.display import IFrame
IFrame("images/butina_full.pdf", width=800, height=500)
```
*Figure 1:* Theoretical example of the Butina clustering algorithm, drawn by Calvinna Caswara.
### Picking diverse compounds
Finding representative sets of compounds is a concept often used in pharmaceutical industry.
* Let's say, we applied a virtual screening campaign but only have a limited amount of resources to experimentally test a few compounds in a confirmatory assay.
* In order to obtain as much information as possible from this screen, we want to select a diverse set. Thus, we pick one representative of each chemical series in our list of potentially active compounds.
Another scenario would be to select one series to gain information about the structure-activity relationship; i.e., how small structural changes in the molecule affect the _in vitro_ activity.
## Practical
### Clustering with the Butina algorithm
Application is following the example of the [TDT tutorial notebook by S. Riniker and G. Landrum](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb).
#### Load data and calculate fingerprints
In this part the data is prepared and fingerprints are calculated.
```
import time
import random
from pathlib import Path
import pandas as pd
import numpy
import matplotlib.pyplot as plt
from rdkit import Chem
from rdkit import DataStructs
from rdkit.ML.Cluster import Butina
from rdkit.Chem import Draw
from rdkit.Chem import rdFingerprintGenerator
from teachopencadd.utils import seed_everything
seed_everything() # fix seed to get deterministic outputs
HERE = Path(_dh[-1])
DATA = HERE / "data"
# Load and have a look into data
# Filtered data taken from **Talktorial T002**
compound_df = pd.read_csv(
HERE / "../T002_compound_adme/data/A2A_compounds_lipinski.csv",
index_col=0,
)
print("Dataframe shape:", compound_df.shape)
compound_df.head()
# Create molecules from SMILES and store in array
compounds = []
# .itertuples() returns a (index, column1, column2, ...) tuple per row
# we don't need index so we use _ instead
# note how we are slicing the dataframe to only the two columns we need now
for _, chembl_id, smiles in compound_df[["molecule_chembl_id", "smiles"]].itertuples():
compounds.append((Chem.MolFromSmiles(smiles), chembl_id))
compounds[:5]
# Create fingerprints for all molecules
rdkit_gen = rdFingerprintGenerator.GetRDKitFPGenerator(maxPath=5)
fingerprints = [rdkit_gen.GetFingerprint(mol) for mol, idx in compounds]
# How many compounds/fingerprints do we have?
print("Number of compounds converted:", len(fingerprints))
print("Fingerprint length per compound:", len(fingerprints[0]))
# NBVAL_CHECK_OUTPUT
```
#### Tanimoto similarity and distance matrix
Now that we generated fingerprints, we move on to the next step: The identification of potential cluster centroids. For this, we define functions to calculate the Tanimoto similarity and distance matrix.
```
def tanimoto_distance_matrix(fp_list):
"""Calculate distance matrix for fingerprint list"""
dissimilarity_matrix = []
# Notice how we are deliberately skipping the first and last items in the list
# because we don't need to compare them against themselves
for i in range(1, len(fp_list)):
# Compare the current fingerprint against all the previous ones in the list
similarities = DataStructs.BulkTanimotoSimilarity(fp_list[i], fp_list[:i])
# Since we need a distance matrix, calculate 1-x for every element in similarity matrix
dissimilarity_matrix.extend([1 - x for x in similarities])
return dissimilarity_matrix
```
See also [[Rdkit-discuss] BulkTanimotoSimilarity](https://sourceforge.net/p/rdkit/mailman/rdkit-discuss/thread/663770d4-b809-c599-e379-31f57380a1d0%40gmail.com/#msg36335970).
```
# Example: Calculate single similarity of two fingerprints
# NBVAL_CHECK_OUTPUT
sim = DataStructs.TanimotoSimilarity(fingerprints[0], fingerprints[1])
print(f"Tanimoto similarity: {sim:.2f}, distance: {1-sim:.2f}")
# Example: Calculate distance matrix (distance = 1-similarity)
tanimoto_distance_matrix(fingerprints)[0:5]
# Side note: That looked like a list and not a matrix.
# But it is a triangular similarity matrix in the form of a list
n = len(fingerprints)
# Calculate number of elements in triangular matrix via n*(n-1)/2
elem_triangular_matr = (n * (n - 1)) / 2
print(
f"Elements in the triangular matrix ({elem_triangular_matr:.0f}) ==",
f"tanimoto_distance_matrix(fingerprints) ({len(tanimoto_distance_matrix(fingerprints))})",
)
# NBVAL_CHECK_OUTPUT
```
#### Clustering molecules: Centroids and exclusion spheres
In this part, we cluster the molecules and look at the results.
Define a clustering function.
```
def cluster_fingerprints(fingerprints, cutoff=0.2):
"""Cluster fingerprints
Parameters:
fingerprints
cutoff: threshold for the clustering
"""
# Calculate Tanimoto distance matrix
distance_matrix = tanimoto_distance_matrix(fingerprints)
# Now cluster the data with the implemented Butina algorithm:
clusters = Butina.ClusterData(distance_matrix, len(fingerprints), cutoff, isDistData=True)
clusters = sorted(clusters, key=len, reverse=True)
return clusters
```
Cluster the molecules based on their fingerprint similarity.
```
# Run the clustering procedure for the dataset
clusters = cluster_fingerprints(fingerprints, cutoff=0.3)
# Give a short report about the numbers of clusters and their sizes
num_clust_g1 = sum(1 for c in clusters if len(c) == 1)
num_clust_g5 = sum(1 for c in clusters if len(c) > 5)
num_clust_g25 = sum(1 for c in clusters if len(c) > 25)
num_clust_g100 = sum(1 for c in clusters if len(c) > 100)
print("total # clusters: ", len(clusters))
print("# clusters with only 1 compound: ", num_clust_g1)
print("# clusters with >5 compounds: ", num_clust_g5)
print("# clusters with >25 compounds: ", num_clust_g25)
print("# clusters with >100 compounds: ", num_clust_g100)
# NBVAL_CHECK_OUTPUT
# Plot the size of the clusters
fig, ax = plt.subplots(figsize=(15, 4))
ax.set_xlabel("Cluster index")
ax.set_ylabel("Number of molecules")
ax.bar(range(1, len(clusters) + 1), [len(c) for c in clusters], lw=5);
```
#### How to pick a reasonable cutoff?
Since the clustering result depends on the threshold chosen by the user, we will have a closer look on the choice of a cutoff.
```
for cutoff in numpy.arange(0.0, 1.0, 0.2):
clusters = cluster_fingerprints(fingerprints, cutoff=cutoff)
fig, ax = plt.subplots(figsize=(15, 4))
ax.set_title(f"Threshold: {cutoff:3.1f}")
ax.set_xlabel("Cluster index")
ax.set_ylabel("Number of molecules")
ax.bar(range(1, len(clusters) + 1), [len(c) for c in clusters], lw=5)
display(fig)
```
As you can see, the higher the threshold (distance cutoff), the more molecules are considered as similar and, therefore, clustered into less clusters.
The lower the threshold, the more small clusters and "singletons" appear.
> The smaller the distance value cut-off, the more similar the compounds are required to be to belong to one cluster.
Looking at the plots above, we decided to choose a distance threshold of `0.2`. There are not many singletons and the cluster sizes don't have an extreme but smooth distribution.
```
cutoff = 0.2
clusters = cluster_fingerprints(fingerprints, cutoff=cutoff)
# Plot the size of the clusters - save plot
fig, ax = plt.subplots(figsize=(15, 4))
ax.set_xlabel("Cluster index")
ax.set_ylabel("# molecules")
ax.bar(range(1, len(clusters) + 1), [len(c) for c in clusters])
ax.set_title(f"Threshold: {cutoff:3.1f}")
fig.savefig(
DATA / f"cluster_dist_cutoff_{cutoff:4.2f}.png",
dpi=300,
bbox_inches="tight",
transparent=True,
)
print(
f"Number of clusters: {len(clusters)} from {len(compounds)} molecules at distance cut-off {cutoff:.2f}"
)
print("Number of molecules in largest cluster:", len(clusters[0]))
print(
f"Similarity between two random points in same cluster: {DataStructs.TanimotoSimilarity(fingerprints[clusters[0][0]], fingerprints[clusters[0][1]]):.2f}"
)
print(
f"Similarity between two random points in different cluster: {DataStructs.TanimotoSimilarity(fingerprints[clusters[0][0]], fingerprints[clusters[1][0]]):.2f}"
)
```
### Visualizing the clusters
#### 10 examples from largest cluster
Now, let's have a closer look at the first 10 molecular structures of the first/largest clusters.
```
print("Ten molecules from largest cluster:")
# Draw molecules
Draw.MolsToGridImage(
[compounds[i][0] for i in clusters[0][:10]],
legends=[compounds[i][1] for i in clusters[0][:10]],
molsPerRow=5,
)
# Save molecules from largest cluster so other talktorials can use it
sdf_path = str(DATA / "molecule_set_largest_cluster.sdf")
sdf = Chem.SDWriter(sdf_path)
for index in clusters[0]:
mol, label = compounds[index]
# Add label to metadata
mol.SetProp("_Name", label)
sdf.write(mol)
sdf.close()
```
#### 10 examples from second largest cluster
```
print("Ten molecules from second largest cluster:")
# Draw molecules
Draw.MolsToGridImage(
[compounds[i][0] for i in clusters[1][:10]],
legends=[compounds[i][1] for i in clusters[1][:10]],
molsPerRow=5,
)
```
The first ten molecules in the respective clusters look indeed similar to each other and many share a common scaffold (visually detected).
See **Talktorial T006** for more information on how to calculate the maximum common substructure (MCS) of a set of molecules.
#### Examples from first 10 clusters
For comparison, we have a look at the cluster centers of the first 10 clusters.
```
print("Ten molecules from first 10 clusters:")
# Draw molecules
Draw.MolsToGridImage(
[compounds[clusters[i][0]][0] for i in range(10)],
legends=[compounds[clusters[i][0]][1] for i in range(10)],
molsPerRow=5,
)
```
Save cluster centers from first 3 clusters as SVG file.
```
# Generate image
img = Draw.MolsToGridImage(
[compounds[clusters[i][0]][0] for i in range(0, 3)],
legends=[f"Cluster {i}" for i in range(1, 4)],
subImgSize=(200, 200),
useSVG=True,
)
# Patch RAW svg data: convert non-transparent to transparent background and set font size
molsvg = img.data.replace("opacity:1.0", "opacity:0.0").replace("12px", "20px")
# Save altered SVG data to file
with open(DATA / "cluster_representatives.svg", "w") as f:
f.write(molsvg)
```
While still some similarity is visible, clearly, the centroids from the different clusters look more dissimilar then the compounds within one cluster.
#### Intra-cluster Tanimoto similarities
We can also have a look at the intra-cluster Tanimoto similarities.
```
def intra_tanimoto(fps_clusters):
"""Function to compute Tanimoto similarity for all pairs of fingerprints in each cluster"""
intra_similarity = []
# Calculate intra similarity per cluster
for cluster in fps_clusters:
# Tanimoto distance matrix function converted to similarity matrix (1-distance)
intra_similarity.append([1 - x for x in tanimoto_distance_matrix(cluster)])
return intra_similarity
# Recompute fingerprints for 10 first clusters
mol_fps_per_cluster = []
for cluster in clusters[:10]:
mol_fps_per_cluster.append([rdkit_gen.GetFingerprint(compounds[i][0]) for i in cluster])
# Compute intra-cluster similarity
intra_sim = intra_tanimoto(mol_fps_per_cluster)
# Violin plot with intra-cluster similarity
fig, ax = plt.subplots(figsize=(10, 5))
indices = list(range(10))
ax.set_xlabel("Cluster index")
ax.set_ylabel("Similarity")
ax.set_xticks(indices)
ax.set_xticklabels(indices)
ax.set_yticks(numpy.arange(0.6, 1.0, 0.1))
ax.set_title("Intra-cluster Tanimoto similarity", fontsize=13)
r = ax.violinplot(intra_sim, indices, showmeans=True, showmedians=True, showextrema=False)
r["cmeans"].set_color("red")
# mean=red, median=blue
```
### Picking the final list of compounds
In the following, we are going to pick a final list of **max. 1000 compounds** as a **diverse** subset.
For this, we take the cluster centroid from each cluster (i.e. the first molecule of each cluster) and then for each cluster (starting with the largest one) we take the 10 molecules (or 50% if less than 10 molecules are left in the cluster) that are most similar to the centroid, until we have selected max. 1000 compounds. Thus, we have representatives of each cluster.
Aim of this compound picking is to ensure the diversity for a smaller set of compounds which are proposed for testing in a confirmatory assay.
> Picking procedure was adapted from [TDT tutorial notebook by S. Riniker and G. Landrum](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb).
As described there: the idea behind this approach is to ensure diversity (representatives of each cluster) while getting some SAR (structure-activity relationship) from the results of the confirmatory assay (groups of quite similar molecules from larger clusters retained).
Get cluster centers.
```
# Get the cluster center of each cluster (first molecule in each cluster)
cluster_centers = [compounds[c[0]] for c in clusters]
# How many cluster centers/clusters do we have?
print("Number of cluster centers:", len(cluster_centers))
# NBVAL_CHECK_OUTPUT
```
Sort clusters by size and molecules in each cluster by similarity.
```
# Sort the molecules within a cluster based on their similarity
# to the cluster center and sort the clusters based on their size
sorted_clusters = []
for cluster in clusters:
if len(cluster) <= 1:
continue # Singletons
# else:
# Compute fingerprints for each cluster element
sorted_fingerprints = [rdkit_gen.GetFingerprint(compounds[i][0]) for i in cluster]
# Similarity of all cluster members to the cluster center
similarities = DataStructs.BulkTanimotoSimilarity(
sorted_fingerprints[0], sorted_fingerprints[1:]
)
# Add index of the molecule to its similarity (centroid excluded!)
similarities = list(zip(similarities, cluster[1:]))
# Sort in descending order by similarity
similarities.sort(reverse=True)
# Save cluster size and index of molecules in clusters_sort
sorted_clusters.append((len(similarities), [i for _, i in similarities]))
# Sort in descending order by cluster size
sorted_clusters.sort(reverse=True)
```
Pick a maximum of 1000 compounds.
```
# Count selected molecules, pick cluster centers first
selected_molecules = cluster_centers.copy()
# Take 10 molecules (or a maximum of 50%) of each cluster starting with the largest one
index = 0
pending = 1000 - len(selected_molecules)
while pending > 0 and index < len(sorted_clusters):
# Take indices of sorted clusters
tmp_cluster = sorted_clusters[index][1]
# If the first cluster is > 10 big then take exactly 10 compounds
if sorted_clusters[index][0] > 10:
num_compounds = 10
# If smaller, take half of the molecules
else:
num_compounds = int(0.5 * len(tmp_cluster)) + 1
if num_compounds > pending:
num_compounds = pending
# Write picked molecules and their structures into list of lists called picked_fps
selected_molecules += [compounds[i] for i in tmp_cluster[:num_compounds]]
index += 1
pending = 1000 - len(selected_molecules)
print("# Selected molecules:", len(selected_molecules))
# NBVAL_CHECK_OUTPUT
```
This set of diverse molecules could now be used for experimental testing.
### Bonus: analysis of run times
At the end of the talktorial, we can play with the size of the dataset and see how the Butina clustering run time changes.
```
# Reuse old dataset
sampled_mols = compounds.copy()
```
Note that you can try out larger datasets, but data sizes larger than 10000 data points already start to consume quite some memory and time (that's why we stopped there).
```
# Helper function for time computation
def measure_runtime(sampled_mols):
start_time = time.time()
sampled_fingerprints = [rdkit_gen.GetFingerprint(m) for m, idx in sampled_mols]
# Run the clustering with the dataset
sampled_clusters = cluster_fingerprints(sampled_fingerprints, cutoff=0.3)
return time.time() - start_time
len(sampled_mols)
# NBVAL_CHECK_OUTPUT
sample_sizes = [100, 500, 1000, 2000]
runtimes = []
# Take random samples with replacement
for size in sample_sizes:
time_taken = measure_runtime(random.sample(sampled_mols, size))
print(f"Dataset size {size}, time {time_taken:4.2f} seconds")
runtimes.append(time_taken)
fig, ax = plt.subplots()
ax.set_title("Runtime measurement of Butina Clustering with different dataset sizes")
ax.set_xlabel("# Molecules in data set")
ax.set_ylabel("Runtime in seconds")
ax.plot(sample_sizes, runtimes, "g^");
```
Notice how the runtime is not exactly proportional to the sample size! It grows faster!
## Discussion
We have introduced the Butina algorithm to cluster a compound dataset and discussed how to pick a reasonable clustering threshold. The clustering was rationalized by looking at example compounds from different clusters and by checking intra-cluster similarities. Finally, the clusters were used to pick a divers subset of compounds.
## Quiz
* Why is clustering of molecules important?
* Which algorithms can you use to cluster a set of molecules and what is the general idea behind the algorithm?
* Do you know other clustering algorithms?
| github_jupyter |
## Getting Started
[`Magma`](https://github.com/phanrahan/magma) is a hardware construction language written in `Python 3`. The central abstraction in `Magma` is a `Circuit`, which is analagous to a verilog module. A circuit is a set of functional units that are wired together.
`Magma` is designed to work with [`Mantle`](https://github.com/phanrahan/mantle), a library of hardware building blocks including logic and arithmetic units, registers, memories, etc.
The [`Loam`](https://github.com/phanrahan/loam) system builds upon the `Magma` `Circuit` abstraction to represent *parts* and *boards*. A board consists of a set of parts that are wired together. `Loam` makes it is easy to setup a board such as the Lattice IceStick.
### Lattice IceStick
In this tutorial, we will be using the Lattice IceStick.
This breakout board contains a ICE40HX FPGA with 1K 4-input LUTs.
The board has several useful peripherals including an FTDI USB interface
with an integrated JTAG interface which is used to program the FPGA
and a USART which is used to communicate with the host.
The board also contains 5 LEDs,
a PMOD interface,
and 2 10-pin headers (J1 and J3).
The 10-pin headers bring out 8 GPIO pins,
as well as power and ground.
This board is inexpensive ($25), can be plugged into the USB port on your laptop,
and, best of all, can be
programmed using an open source software toolchain.

Additional information about the IceStick Board can be found in the
[IceStick Programmers Guide](http://www.latticesemi.com/~/media/LatticeSemi/Documents/UserManuals/EI/icestickusermanual.pdf)
### Blink
As a first example,
let's write a `Magma` program that blinks an LED on the Icestick Board.
First, we import `Magma` as the module `m`.
Next, we import `Counter` from `Mantle`.
Before doing the import we configure mantle to use the ICE40 as the target device.
```
import magma as m
m.set_mantle_target("ice40")
```
The next step is to setup the IceStick board. We import the class `IceStick` from `Loam`.
We then create an instance of an `IceStick`.
This board instance has member variables
that store the configuration of all the parts on the board.
The blink program will use the Clock and the LED D5.
Turning *on* the Clock and the LED D5 sets up the build environment
to use the associated ICE40 GPIO pins.
```
from loam.boards.icestick import IceStick
# Create an instance of an IceStick board
icestick = IceStick()
# Turn on the Clock
# The clock must turned on because we are using a synchronous counter
icestick.Clock.on()
# Turn on the LED D5
icestick.D5.on();
```
Now that the IceStick setup is done,
we create a `main` program that runs on the Lattice ICE40 FPGA.
This main program becomes the top level module.
We create a simple circuit inside `main`.
The circuit has a a 22-bit counter wired to D5.
The crystal connected to the ICE40 has a frequency of 12 Mhz.
so the counter will increment at that rate.
Wiring the most-significant bit of the counter to D5
will cause the LED to blink roughly 3 times per second.
`D5` is accessible via `main`.
In a similar way, the output of the counter is accesible via `counter.O`,
and since this an array of bits we can access the MSB using Python's standard list indexing syntax.
```
from mantle import Counter
N = 22
# Define the main Magma Circuit on the FPGA on the IceStick
main = icestick.DefineMain()
# Instance a 22-bit counter
counter = Counter(N)
# Wire bit 21 of the counter's output to D5.
m.wire(counter.O[N-1], main.D5)
# End main
m.EndDefine()
```
We then compile the program to verilog. This step also creates a PCF (physical constraints file).
```
m.compile('build/blink', main)
```
Now we run the open source tools for the Lattice ICE40.
`yosys` synthesizes the input verilog file (`blink.v`)
to produce an output netlist (`blink.blif`).
`arachne-pnr` runs the place and router and generates the bitstream as a text file.
`icepack` creates a binary bitstream file that can be downloaded to the FPGA. `iceprog` uploads the bitstream to the device. Once the device has been programmed, you should see the center, green LED blinking.
```
%%bash
cd build
yosys -q -p 'synth_ice40 -top main -blif blink.blif' blink.v
arachne-pnr -q -d 1k -o blink.txt -p blink.pcf blink.blif
icepack blink.txt blink.bin
iceprog blink.bin
```
You can view the verilog file generated by `Magma`.
```
%cat build/blink.v
```
Notice that the top-level module contains two arguments (ports),
`D5` and `CLKIN`.
`D5` has been configured as an output,
and `CLKIN` as an input.
The mapping from these named arguments to pins is contained in the
PCF (physical constraint file).
```
%cat build/blink.pcf
```
`D5` is connected to pin 95 and `CLKIN` is connected to pin 21.
| github_jupyter |
# Tahoe Healthcare
## How to reduce readmissions to each hospital
- The goal of this case is exploratory data analysis to understand what factors are the biggest indicator or readmissions. This way, instead of rolling out 'Care Tracker' to every patient ( which costs `$1,200` per patient), only the groups of patients most at risk of being re-admitted will be enrolled into the Care Tracker program.
- The first section will be some basic exploratory data analysis to understand the makeup of the patient data.
- The second section will look into clustering from both a manegerial and statistical perspective.
- The third second will work on fitting different supervised marchine learning classification models (based on RMSE) to predict readmission.
- The fourth section will outline the most important variables to predict readmission, total money saved, as well as recommendations to Tahoe Healthcare.
## Exploratory data analysis
```
import pandas as pd
import numpy as np
import seaborn as sns
from itertools import product
from collections import defaultdict
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.ensemble import RandomForestClassifier
from model_testing import Model_Testing_Regression
from scipy.spatial.distance import euclidean
from sklearn.metrics import r2_score
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, auc, confusion_matrix
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from operator import itemgetter
from sklearn.preprocessing import StandardScaler
%pylab inline
tahoe_df = pd.read_csv('Final Project Data_Case.csv')
tahoe_df.tail()
```
- Rename the columns to make them easier to work with
```
tahoe_df['age']=tahoe_df['age']
tahoe_df['severity_score']=tahoe_df['severity score']
tahoe_df['comorbidity_score'] = tahoe_df['comorbidity score']
tahoe_df.drop(['age','severity score','comorbidity score'],axis=1,inplace=True)
tahoe_df.info()
# 4,382 patient records
tahoe_df.describe()
```
- Age varies from 65 to 105
- Slightly more males
- More admits outside of flue season,
- Majority are emergency room admits
- Most admits have fairly high severity score and comordibity score
- About 23% readmit after 30 days
```
tahoe_corr_matrix = tahoe_df.corr()
tahoe_corr_matrix
sns.heatmap(tahoe_corr_matrix);
```
- Interested in correlations with readmit30
- High correlation with comorbidty score, severity score, and to a lesser extend flu season and age
- Next, plot the distributions of these variables
```
sns.distplot(tahoe_df.loc[:,'age']);
sns.distplot(tahoe_df.loc[:,'female']);
sns.distplot(tahoe_df.loc[:,'flu_season']);
sns.distplot(tahoe_df.loc[:,'ed_admit']);
sns.distplot(tahoe_df.loc[:,'severity_score']);
sns.distplot(tahoe_df.loc[:,'comorbidity_score']);
```
## Cluster the patients
- First, managerial clustering
- Cluster on the percentiles of comorbidty score, severity score, and flu season. There are four quartiles for each of the first two varaibles (and two for the second for a total of 32 `bclusters
- Second, statistical clustering
- K-means will be used on all of the varaibles to determine the optimal clustering strategy
```
tahoe_quartiles = tahoe_df.describe()
severity_score_quartiles = [i for i in tahoe_quartiles['severity_score'].iloc[4:7]]
severity_score_quartiles.append(33) ## to account for last quartile
comorbidity_score_quartiles = [i for i in tahoe_quartiles['comorbidity_score'].iloc[4:7]]
comorbidity_score_quartiles.append(132)## to account for last quartile
flu_season=[.1,.9] # greater than or less than this (i.e. 0 or 1)
tahoe_quartiles.loc[:,('severity_score','comorbidity_score','flu_season')]
tahoe_df.head()
tahoe_df['severity_quantile'] = pd.qcut(tahoe_df['severity_score'], 4, labels=False)
tahoe_df['comorbidity_quantile'] = pd.qcut(tahoe_df['comorbidity_score'], 4, labels=False)
severity_score_quartiles
comorbidity_score_quartiles
def assign_managerial_clusters(input_dataframe):
"""Assign managerial clusters given the severity socre, comorbidty score, and fluseason indicator.
This assumes that the input dataframe already has indicators for the bins for each columns"""
count = 1
df = input_dataframe.copy()
list_of_df = []
count = 0
df['managerial_cluster']=np.nan
final_df = pd.DataFrame(columns = ['female', 'flu_season', 'ed_admit', 'readmit30', 'age',
'severity_score', 'comorbidity_score', 'severity_quantile',
'comorbidity_quantile','managerial_cluster'])
count = 0
row_of_assignments = []
cluster_assignments =defaultdict(int)
for comordibty_q in range(4):
for severity_q in range(4):
for flu_h in range(2):
cluster = df[(df['comorbidity_quantile'] == comordibty_q) & \
(df['severity_quantile'] == severity_q) &\
(df['flu_season'] == flu_h)]
cluster['managerial_cluster'] = count
final_df = pd.concat([final_df,cluster]) ## add to a final DF
cluster_assignments[comordibty_q,severity_q,flu_h]=count
count +=1
return final_df,cluster_assignments
tahoe_mang_cluster_df, custer_assignments = assign_managerial_clusters(tahoe_df)
```
- Next, determine the probability of re-admittance per managerial cluster
```
## total number of readmission per managerial cluster
readmission_per_cluster = tahoe_mang_cluster_df.groupby(['managerial_cluster'])['readmit30'].sum().reset_index()
readmission_per_cluster.head()
# divide by the total number to get the probability of re-admission per cluster
patients_per_cluster = tahoe_mang_cluster_df.groupby(['managerial_cluster'])['readmit30'].count().reset_index()
## probability of readmission per cluster
probability_readmission_per_cluster = (readmission_per_cluster.readmit30/patients_per_cluster.readmit30).reset_index()
#sorted_probability_readmission_per_cluster = probability_readmission_per_cluster.sort_values(ascending=False).reset_index()
probability_readmission_per_cluster['probability_of_readmit'] =probability_readmission_per_cluster['readmit30']
probability_readmission_per_cluster['managerial_cluster']=probability_readmission_per_cluster['index']
probability_readmission_per_cluster['patients_per_cluster']=patients_per_cluster['readmit30']
probability_readmission_per_cluster['readmit_per_cluster'] = readmission_per_cluster['readmit30']
# sort by top readmit clusters
probability_readmission_per_cluster.sort_values('probability_of_readmit',ascending=False,inplace=True)
### add in the probability of readmit with caretracker (lowers it by 40%)
probability_readmission_per_cluster['probability_readmit_caretracker'] = \
probability_readmission_per_cluster.probability_of_readmit*.6
```
- Next, determine the cost of adding Care Tracker per cluster per person in each cluster
```
def find_minimum_cost(dataframe_input,cost_of_readmit=8000,cost_of_caretracker=1200):
"""find the minimum combination between using the caretracker for each person in a cluster, or the cost
of readmission $8k per person in that cluter.
The formula is The Cost of readmittance * Number of patients who have be readmitted , compared to
The probability of readmittance given that a patient is using care tracker,
times the number of patients in that cluster, time the cost of readmittance, plus the cost of care tracker times
the number of patients in that cluster
"""
dataframe_i = dataframe_input.copy()
list_of_options = []
min_cost_per_option = []
alternative_cost = []
for idx, row in dataframe_i.iterrows():
if (row['probability_readmit_caretracker'] *row['patients_per_cluster'] * cost_of_readmit + \
cost_of_caretracker *row['patients_per_cluster']) \
< (cost_of_readmit *row['readmit_per_cluster']):
list_of_options.append(1) ## assign to caretracker program
min_cost_per_option.append(row['probability_readmit_caretracker']\
*row['patients_per_cluster'] * cost_of_readmit + \
cost_of_caretracker *row['patients_per_cluster'])
alternative_cost.append(cost_of_readmit *row['probability_readmit_caretracker']\
*row['patients_per_cluster'])
else:
list_of_options.append(0) ## don't assign to caretracker program
min_cost_per_option.append(cost_of_readmit *row['readmit_per_cluster'])
alternative_cost.append(row['probability_readmit_caretracker']\
*row['patients_per_cluster'] * cost_of_readmit + \
cost_of_caretracker *row['patients_per_cluster'])
dataframe_i['min_cost']=min_cost_per_option
dataframe_i['option']=list_of_options # 1 = assign to caretracker 0 = don't assign to caretracker
dataframe_i['alternative_cost'] = alternative_cost
return dataframe_i
min_cost_tahoe_prob_df = find_minimum_cost(probability_readmission_per_cluster)
# This is the cost of assigning everyone to care tracker
min_cost_tahoe_prob_df['care_tracker_cost_cluster'] = \
min_cost_tahoe_prob_df['patients_per_cluster']*1200 + \
min_cost_tahoe_prob_df['probability_readmit_caretracker']*min_cost_tahoe_prob_df['patients_per_cluster']*8000
# This is the cost of assigning no one to care tracker
min_cost_tahoe_prob_df['readmit_cost_cluster'] = \
min_cost_tahoe_prob_df['readmit_per_cluster']*8000
# Find the savings per cluster
savings_over_readmit= sum(min_cost_tahoe_prob_df.readmit_cost_cluster -min_cost_tahoe_prob_df.min_cost )
savings_over_care_tracker = sum(min_cost_tahoe_prob_df.care_tracker_cost_cluster-min_cost_tahoe_prob_df.min_cost )
total_cost_caretracker = sum(min_cost_tahoe_prob_df.care_tracker_cost_cluster)
total_cost_readmitt_no_caretracker = sum(min_cost_tahoe_prob_df.readmit_cost_cluster)
managerial_min_cost = sum(min_cost_tahoe_prob_df.min_cost )
print("Tahoe will save {:20,.2f} compared to not assigning anyone to care tracker".format(savings_over_readmit))
print("Tahoe will save {:20,.2f} compared to assigning everyone to care tracker".format(savings_over_care_tracker))
baseline_readmittance = sum(min_cost_tahoe_prob_df.readmit_per_cluster)/sum(min_cost_tahoe_prob_df.patients_per_cluster)
baseline_noreadmittance = 1-baseline_readmittance
print("The total cost of assigning everyone to caretracker is {:20,.2f}".format(total_cost_caretracker))
print("The total cost of assigning noone to caretracker {:20,.2f}".format(total_cost_readmitt_no_caretracker ))
print("The total cost of using maangerial clusters and assigning to caretracker from there is {:20,.2f}".format(managerial_min_cost))
print("The baseline probability of re-admittance is {:.2%}".format(
sum(min_cost_tahoe_prob_df.readmit_per_cluster)/sum(min_cost_tahoe_prob_df.patients_per_cluster)))
print("The baseline of no readmittance is {:.2%}".format(baseline_noreadmittance))
```
- Graph the probability of readmission per cluster
```
mang_cluster_and_prob_readmit = tahoe_mang_cluster_df.groupby('managerial_cluster')['readmit30'].mean().reset_index()
mang_cluster_and_prob_readmit['probability_of_readmission']=mang_cluster_and_prob_readmit['readmit30']
mang_cluster_and_prob_readmit=mang_cluster_and_prob_readmit.sort_values('probability_of_readmission',ascending=False)
plt.figure(figsize=(12,5))
plt.title('Readmission per patient quantile')
sns.barplot(x=mang_cluster_and_prob_readmit.managerial_cluster, y=mang_cluster_and_prob_readmit.probability_of_readmission)
min_cost_tahoe_prob_df.head()
sum(min_cost_tahoe_prob_df [(min_cost_tahoe_prob_df.managerial_cluster>=0) &
(min_cost_tahoe_prob_df.managerial_cluster<=15)]['readmit_per_cluster'])*8000
cluster_one = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==0]['patients_per_cluster'])*8000*.02
cluster_two = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==1]['patients_per_cluster'])*8000*.1
cluster_three = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==2]['patients_per_cluster'])*8000*.05
cluster_four = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==3]['patients_per_cluster'])*8000*.1
cluster_five = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==4]['patients_per_cluster'])*8000*.08
cluster_six = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==5]['patients_per_cluster'])*8000*.11
cluster_seven = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==6]['patients_per_cluster'])*8000*.11
cluster_nine =\
sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==8]['patients_per_cluster'])*8000*.09
cluster_ten =\
sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==9]['patients_per_cluster'])*8000*.085
cluster_eleven =\
sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==10]['patients_per_cluster'])*8000*.08
cluster_twelve =\
sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==11]['patients_per_cluster'])*8000*.18
cluster_thirteen =\
sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==12]['patients_per_cluster'])*8000*.12
cluster_fourteen =\
sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==13]['patients_per_cluster'])*8000*.18
cluster_eight =\
sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==7]['patients_per_cluster'])*8000*.21
cluster_fifteen =\
sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==14]['patients_per_cluster'])*8000*.23
cluster_sixteen=\
sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==14]['patients_per_cluster'])*8000*.3
print("Expected cost for first 16 cluster {}".format(sum([cluster_one,cluster_two,cluster_three,\
cluster_four,cluster_five,\
cluster_six,\
cluster_seven,\
cluster_eight,\
cluster_nine,cluster_ten,cluster_eleven,cluster_twelve,cluster_thirteen,cluster_fourteen,cluster_fifteen])))
fig = sns.barplot(x=['everyone caretracker','no one caretracker','managerial decision rule'],\
y=[total_cost_caretracker,total_cost_readmitt_no_caretracker,managerial_min_cost])
fig.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.title('Cost Comparison')
plt.ylabel("Dollars")
# How many patients to assign to caretracker
sum(min_cost_tahoe_prob_df[min_cost_tahoe_prob_df.option==1]['patients_per_cluster'])
```
### Following the advice from managerial clustering on which segments to assign to care tracker, we would save `$2,491,200` compared to assigning everyone to care tracker and will save `$426,000` compared to not assigning anyone to care tracker.
- This is assuming our sample of patients represents the entire population of patients
- Managerial is typically easier for businesses to implement compared to an algorithm
- However, some risks inlude the fact the this is a static model that does not evolve over time.
> We should assign segments, 31,29,25,27,30, and 28 to care tracker
- 31: comorbidity_score above 131, severity_score above 32, in flu season
- 29: comorbidity_score above 131, severity_score 19-32, in flu season
- 25: comorbidity_score above 131, severity_score below 8, in flu season
- 27: comorbidity_score above 131, severity_score 8 to 19, in flu season
- 30: comorbidity_score above 131, severity_score above 32, not in flu season
- 28: comorbidity_score above 131, severity_score 19-32, not in flu season
# Next, investigate statistical clustering and machine learning to determine the best option.
- Start with k-means
- To find the ideal number of clusters, run a silhouette score model on different cluster sizes
- Don't cluster on readmit rate, we don't know this when a patient comes in
- Then, investigate a couple supervised classification models
```
def euclidean_distance(a,b):
"""Expects numpy array and returns the euclidan distance between them"""
return sqrt(sum((a-b)**2))
clustering_df = tahoe_df.iloc[:,:5]
clustering_df.head()
def sample_silhouette_score(dataframe_input,max_cluster_size=100):
"""Run a three fold CV on 10,000 samples from the dataframe to determine the ideal number of clusters.
Output is the ideal number of clusters of 3 CV folds with 10k samples."""
silhouette_score_l = []
predicted_labels = defaultdict(list)
for clusters in range(2,max_cluster_size):
knn_classifier = KMeans(clusters)
silhouette_scores_for_this_cluster = []
fit_knn = knn_classifier.fit(dataframe_input)
predicted_labels[clusters]=fit_knn.labels_
predicted_lab = fit_knn.labels_
silhouette_score_l.append(silhouette_score(X=dataframe_input,labels=predicted_lab))
print('Finished iteration {}'.format(clusters))
number_of_clusters = [i for i in range(2,max_cluster_size)]
plt.plot([i for i in range(2,max_cluster_size)],silhouette_score_l)
plt.title('Ideal Number of Clusters')
plt.ylabel('Silhouette score')
plt.xlabel('Number of clusters')
print('The best number of clusters is {}'.format(number_of_clusters[np.argmax(np.array(silhouette_score_l))]))
return predicted_labels[number_of_clusters[np.argmax(np.array(silhouette_score_l))]]
clustered_labels = sample_silhouette_score(clustering_df)
```
##### The ideal number of clusters is two, which means that there is not a lot of difference in the people being clustered. We will skip the clustering here and move onto a machine learning algorithm
- Test random forest
- Logistic regression
- Gradient Boosting
- SVMs
```
admitted_df =tahoe_df[tahoe_df.readmit30==1]
not_admitted_df = tahoe_df[tahoe_df.readmit30==0]
len(admitted_df)
```
### Build a random forest model
```
# not balanced
tahoe_X = tahoe_df.loc[:,('female', 'flu_season', 'ed_admit', 'age',\
'severity_score', 'comorbidity_score')]
tahoe_y = tahoe_df.loc[:,'readmit30']
tahoe_X_labels = ['female', 'flu_season', 'ed_admit', 'age',
'severity_score', 'comorbidity_score']
tahoe_y_labels = 'readmit30'
X_train_rf, X_test_rf, y_train_rf, y_test_rf = train_test_split(tahoe_X,tahoe_y,random_state=42)
rf_params = {'n_estimators':[i for i in range(100,500,50)],'max_depth':[i for i in range(1,5)]}
rf_grid = GridSearchCV( RandomForestClassifier(),rf_params)
rf_grid.fit(X_train_rf,y_train_rf)
# Create RF model
best_rf_model = rf_grid.best_estimator_
# Best random forest
best_rf_model.fit( X_train_rf, y_train_rf)
# compute the metrics on this model
best_rf_rmse =euclidean(y_test_rf,best_rf_model.predict(X_test_rf))/sqrt(len(y_test_rf))
print('Best RF RMSE :{}'.format(best_rf_rmse ))
print()
best_rf_r2 =abs(r2_score(y_test_rf,best_rf_model.predict(X_test_rf)))
print('Best RF R^2 score :{:.2%}'.format(best_rf_r2))
print()
rf_accuracy_score = accuracy_score(y_test_rf,best_rf_model.predict(X_test_rf))
print("Best accuracy score {:.2%}".format(rf_accuracy_score))
feature_weights_rf = best_rf_model.feature_importances_[np.argsort(best_rf_model.feature_importances_)[::-1]]
# print(regression_tahoe_readmit.x_labels)
feature_names_rf = np.array(tahoe_X.columns)[np.argsort(best_rf_model.feature_importances_)[::-1]]
print([('Feature:',i,'Importance:',f)\
for f,i in zip(best_rf_model.feature_importances_[np.argsort(best_rf_model.feature_importances_)[::-1]],\
np.array(tahoe_X.columns)[np.argsort(best_rf_model.feature_importances_)[::-1]])])
fpr_rf,tpr_rf,threshold_rf = roc_curve(y_test_rf, [_[1] for _ in best_rf_model.predict_proba(X_test_rf)])
auc_rf = auc(fpr_rf,tpr_rf)
```
## Next, try logistic classification
```
scaler = StandardScaler()
logistic_params = {'penalty':['l1','l2'],'C':[i for i in np.linspace(.3,1,100)],\
'max_iter':[i for i in range(5,50,5)]}
log_model = LogisticRegression()
logistic_m = GridSearchCV(LogisticRegression() ,logistic_params )
log_model= GridSearchCV(LogisticRegression() ,logistic_params )
X_train_log, X_test_log, y_train_log, y_test_log = train_test_split(np.array(tahoe_X.values.astype(float)),
np.array(tahoe_y.values.astype(float)),random_state=42)
# scale features to compute variable importance
scaled_x_train = scaler.fit_transform(X_train_log)
scaled_x_test = scaler.fit_transform(X_test_log)
logistic_m.fit(scaled_x_train,y_train_log)
log_model.fit(X_train_log,y_train_log )
logistic_m.best_estimator_
# return the best estimator
logistic_model = log_model.best_estimator_
# scaled coefficients
log_m = logistic_m.best_estimator_
best_logistic_rmse =euclidean(y_test_log,logistic_model.predict(X_test_log))/sqrt(len(y_test_log))
print('Best logistic RMSE :{}'.format(best_logistic_rmse))
print()
best_logistic_r2 = abs(r2_score(y_test_log,logistic_model.predict(X_test_log)))
print('Best logistic R^2 score :{:.2%}'.format(best_logistic_r2))
print()
accuracy_score_log = accuracy_score(y_test_log,logistic_model.predict(X_test_log))
print("Best logistic accuracy {:.1%}".format(accuracy_score_log))
feature_weights_logistic = abs(logistic_model.coef_)[0][np.argsort(abs(log_m.coef_))[0][::-1]]
feature_names_logistic = np.array(tahoe_X.columns)[np.argsort(log_m.coef_)[0][::-1]]
print([('Feature:',i,'Importance:',f)\
for f,i in zip(feature_weights_logistic ,\
feature_names_logistic)])
fpr_log,tpr_log,threshold = roc_curve(y_test_log, [_[1] for _ in logistic_model.predict_proba(X_test_log)])
#area under the curve for the ROC curve
auc_log = auc(fpr_log,tpr_log)
```
# Try gradient boosting as well
```
g_boost_params = {'max_depth':[i for i in range(1,5)],'n_estimators':[i for i in range(50,500,50)],\
'loss':['deviance','exponential']}
X_train_gb, X_test_gb, y_train_gb, y_test_gb = train_test_split(tahoe_X,tahoe_y,random_state=42)
grid_gb = GridSearchCV(GradientBoostingClassifier(),g_boost_params)
grid_gb.fit(X_train_gb,y_train_gb)
grid_gb.best_estimator_
GBoostModel = grid_gb.best_estimator_
best_gb_rmse =euclidean(y_test_gb,GBoostModel.predict(X_test_gb))/sqrt(len(y_test_gb))
print('Best gb RMSE :{}'.format(best_gb_rmse))
print()
best_gb_r2 = abs(r2_score(y_test_gb,GBoostModel.predict(X_test_gb)))
print('Best gb R^2 score :{:.2%}'.format(best_gb_r2))
print()
accuracy_score_gb = accuracy_score(y_test_gb,GBoostModel.predict(X_test_gb))
print("Best gb accuracy {:.1%}".format(accuracy_score_gb))
feature_weights_gb = GBoostModel.feature_importances_[np.argsort(GBoostModel.feature_importances_)[::-1]]
feature_names_gb = np.array(tahoe_X.columns)[np.argsort(GBoostModel.feature_importances_)[::-1]]
print([('Feature:',i,'Importance:',f)\
for f,i in zip(feature_weights_gb ,\
feature_names_gb)])
GBoostModel.predict_proba(X_test_gb)
GBoostModel.classes_
fpr_gb,tpr_gb,threshold = roc_curve(np.array(y_test_gb), [_[1] for _ in GBoostModel.predict_proba(X_test_gb)])
auc_gb= auc(fpr_gb,tpr_gb)
```
# Finally, use support vector machines to predict readmission
```
svm_model = SVC(probability=True)
params_svm ={'C':[i for i in np.linspace(.0000001,2,10)],'gamma':[i for i in np.linspace(.0001,2,10)]}
best_svm_model = GridSearchCV(svm_model,params_svm)
X_train_svm, X_test_svm, y_train_svm, y_test_svm = train_test_split(tahoe_X,tahoe_y,random_state=42)
svm_m = best_svm_model.fit(X_train_svm,y_train_svm)
svm_model = svm_m.best_estimator_
svm_m.best_estimator_
#compute SVM metrics
best_svm_rmse =euclidean(y_test_svm,svm_model.predict(X_test_svm))/sqrt(len(y_test_svm))
print('Best svm RMSE :{}'.format(best_svm_rmse))
print()
best_svm_r2 = abs(r2_score(y_test_svm,svm_model.predict(X_test_svm)))
print('Best svm R^2 score :{:.2%}'.format(best_svm_r2))
print()
accuracy_score_svm = accuracy_score(y_test_svm,svm_model.predict(X_test_svm))
print("Best svm accuracy {:.1%}".format(accuracy_score_svm))
best_svm_model.predict_proba(X_test_gb)
fpr_svm,tpr_svm,threshold_svm = roc_curve(y_test_svm,[_[1] for _ in best_svm_model.predict_proba(X_test_svm)],pos_label=1)
auc_svm = auc(fpr_svm,tpr_svm)
print("The area under the curve for logistic {}, random forest {}, gradient boosting {}, svm {}".format(
auc_log,auc_rf,auc_gb,auc_svm))
```
## Compare each model using a ROC curve
```
# baseline for ROC curve
baseline_x = [ i for i in np.linspace(0,1,100)]
baseline_y = [ i for i in np.linspace(0,1,100)]
plt.figure(figsize=(10,5))
plt.plot(fpr_log,tpr_log, label='LOG',color='yellow')
plt.plot(fpr_rf,tpr_rf, label = 'RF')
plt.plot(fpr_gb,tpr_gb,label='GB')
plt.plot(fpr_svm,tpr_svm, label = 'SVM')
plt.plot(baseline_x,baseline_y,label='BASELINE',linestyle='dashed')
plt.title("ROC curve foreadmissions")
plt.ylabel("True Positive Rate")
plt.xlabel("False Postiive Rate")
plt.legend()
```
- Based upon this, use the Logistic Classification model
- Run each row through the model, and generate a probability of readmittance
- From this probability, determine where the threshold is to minimize cost
```
## only 465 positive cases from the default logistic regression (however, this does not distinguish between false
## positive and true positive)
sum(logistic_model.predict(tahoe_X))
# almost one thousand positive in reality
sum(tahoe_y)
```
### Create a confusion matrix to understand th tp,fp,tn,fn for logistic regression
- Compare the deafult threshold of .50 to the optimal threshold of .34 to see where patients are placed
```
## 00 is true negatives
## false negative 10
# true positive 1,1
# false postiive 0,1
log_confusion_matrix = confusion_matrix(tahoe_y , logistic_model.predict(tahoe_X))
310*8000+310*1200+688*8000
log_confusion_matrix
sns.heatmap(log_confusion_matrix);
plt.xlabel("predicted")
plt.ylabel("actual")
```
- Lot of True Negatives (0,0)
- Also, a lot of false negatives (cases where the person was readmitted but we perdicted that there were not going to be readmitted. This means we are conservative in our guessing readmittance.
- Adjust threshold to reduce the false negatives
```
#logsitic_predictions_df['prob_readmit']=prob_readmit
#logsitic_predictions_df['prob_noreadmit']=prob_noreadmit
#prob_readmit_per_managerial_cluster = min_cost_tahoe_prob_df.loc[:,('managerial_cluster',"probability_of_readmit")]
#prob_readmit_per_managerial_cluster.head()
```
## Next, combine this probability or readmit from logistic, with the probability of readmission per cluster from the managerial clusters
```
combo_df = tahoe_mang_cluster_df.join(prob_readmit_per_managerial_cluster,on='managerial_cluster',lsuffix='m_clust')
combo_df.drop('managerial_clusterm_clust',axis=1,inplace=True)
logistic_model.predict_proba(combo_df.loc[:,('female','flu_season','ed_admit',
'age','severity_score','comorbidity_score')])
prob_noreadmit, prob_readmit = zip(*logistic_model.predict_proba(combo_df.loc[:,('female','flu_season','ed_admit',
'age','severity_score','comorbidity_score')]))
combo_df['prob_readmit_logistic']=prob_readmit
combo_df['prob_noreadmit_logistic']=prob_noreadmit
combo_df['logistic_prediction']=logistic_model.predict(combo_df.loc[:,('female','flu_season','ed_admit',
'age','severity_score','comorbidity_score')])
combo_df.tail()
```
### Current threshold is at .50, find the number that maximizes the number of true positives and true negatives (and minimizes cost)
- This will help compensate for the uneven number of people in each case
```
def find_threshold(dataframe_i,caretracker_cost=1200,readmit_cost=8000):
"""Find the best threshold that minimizes cost for logistic classification.
The formula is The Cost of readmittance * Number of patients who have be readmitted , compared to
The probability of readmittance given that a patient is using care tracker,
times the number of patients in that cluster, time the cost of readmittance, plus the cost of care tracker times
the number of patients in that cluster.
Returnsa list of tuples containing each threshold and the cost of that threshold
"""
dataframe_i = dataframe_i.copy()
cost_per_threshold =[]
list_of_thresholds = [i for i in np.linspace(.01,.99,60)]
for threshold in list_of_thresholds:
default_threshold = .5
print(threshold,'current threshold')
## check if the probability prediction by logistic is greater than our threshold
dataframe_i['predicted_outcome'] = dataframe_i.prob_readmit_logistic > threshold
#dataframe_i['predicted_outcome_logistic'] = dataframe_i.prob_readmit_logistic > deafult_threshold
expected_current_cost = 0
## based upon this threshold, go through each row and determine the cost of that patient
for idx, row in dataframe_i.iterrows():
if row['predicted_outcome']==1 and row['readmit30']==1:
expected_current_cost += caretracker_cost + .6*readmit_cost
## caretracker lowers the chance of being readmitted by 40%
## our prediction was correct here
#print(row)
elif row['predicted_outcome']==1 and row['readmit30']==0:
## our algo was wrong
expected_current_cost += caretracker_cost
elif row['predicted_outcome']==0 and row['readmit30']==1:
### our algo was wrong, false negative
expected_current_cost +=readmit_cost
elif row['predicted_outcome']==0 and row['readmit30']==0:
## true negative does not add any cost to us
continue
cost_per_threshold.append((threshold,expected_current_cost))
# get the default cost of logistic
expected_default_cost = 0
default_threshold = .5
default_cost_per_threshold = []
for idx, row in dataframe_i.iterrows():
if row['logistic_prediction']==1 and row['readmit30']==1:
expected_default_cost += caretracker_cost + .6*readmit_cost
## caretracker lowers the chance of being readmitted by 40%
## our prediction was correct here
#print(row)
elif row['logistic_prediction']==1 and row['readmit30']==0:
## our algo was wrong
expected_default_cost += caretracker_cost
elif row['logistic_prediction']==0 and row['readmit30']==1:
### our algo was wrong, false negative
expected_default_cost +=readmit_cost
elif row['logistic_prediction']==0 and row['readmit30']==0:
## true negative does not add any cost to us
continue
default_cost_per_threshold.append(([i for i in np.linspace(.01,.99,80)],\
[expected_default_cost for _ in range(len(dataframe_i))]) )
return cost_per_threshold,default_cost_per_threshold
combo_df.head()
best_thresholds, default_threshold = find_threshold(combo_df)
plt.plot(*zip(*best_thresholds))
f = plt.plot(default_threshold[0][0],[default_threshold[0][1][0] for _ in np.linspace(.01,99,80)])
plt.ylabel('Total cost MM')
plt.xlabel('Prob. of readmission threshold')
plt.title("Optimal Threshold vs. Default Treshold - Logistic Classification");
f.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
min_threshold, min_cost_optimal_threshold = min(best_thresholds, key=itemgetter(1))
min_threshold
min_cost_optimal_threshold
log_confusion_matrix
top_threshold_matrix
combo_df.head()
combo_df['new_threshold(.34)_prediction']=combo_df['prob_readmit_logistic']>.34
top_threshold_matrix = confusion_matrix(tahoe_y, combo_df['new_threshold(.34)_prediction'])
top_threshold_matrix
log_confusion_matrix
sns.heatmap(top_threshold_matrix);
plt.xlabel("predicted")
plt.ylabel("actual")
plt.title('Confusion Matrix with .34 threshold')
sns.heatmap(log_confusion_matrix,annot_kws=True);
plt.xlabel("predicted")
plt.ylabel("actual")
plt.title('Confusion matrix with default(.50) threshold')
```
#### The number of true positives decreased while the number of false negatives increased. Let us explore why
- Cost of true positive is caretracker_cost (`$1,200`) + $.6*$readmit_cost(`$8,000`)
- Cost of false negative is readmit_cost(`$8,000`)
- Therefore, only want to assign people to caretracker if you are VERY sure they will be readmitted. We are willing to accept more flase negatives since they are generally less expensive.
- Find where the cross over threshold is
```
1200+.05*8000
combo_df.head()
combo_df['new_threshold(.69)_prediction']=combo_df['prob_readmit_logistic']>.69
default_cost_logistic = default_threshold[0][1][0]
default_cost_logistic
print("The total ACTUAL cost of assigning everyone to caretracker is {:20,.2f}".format(total_cost_caretracker))
print("The total ACTUAL cost of assigning noone to caretracker {:20,.2f}".format(total_cost_readmitt_no_caretracker ))
print("The total EXPECTED cost of using managerial clusters and assigning to caretracker from there is {:20,.2f}".format(managerial_min_cost))
print("The EXPECTED cost of using logistic with the default threshold {:20,.2f}".format(default_cost_logistic))
print("The EXPECTED cost of using logistic with the optimal (.34) threshold {:20,.2f}".format(min_cost_optimal_threshold ))
print("The savings of using the optimal logistic model is {:20,.2f}".format(total_cost_readmitt_no_caretracker-min_cost_optimal_threshold))
```
- Plot all of the costs against eachother
```
plt.figure(figsize=(10,5))
fig = sns.barplot(x=['everyone caretracker','no one caretracker','managerial decision rule',\
'default logistic','optimal logistic'],\
y=[total_cost_caretracker,total_cost_readmitt_no_caretracker,managerial_min_cost,
default_cost_logistic,min_cost_optimal_threshold ],ci='.9')
fig.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.title('Cost Comparison')
plt.ylabel("Dollars")
# Error range for optimal logistic given that accuracy is only 80%
"{:20,.2f} -{:20,.2f} ".format(min_cost_optimal_threshold*.2+min_cost_optimal_threshold, \
min_cost_optimal_threshold-min_cost_optimal_threshold*.2,)
```
### Finally, compre the number of people enrolled in caretracker across policies
```
plt.figure(figsize=(10,5))
sns.barplot(x=['everyone caretracker','managerial decision rule',\
'default logistic','optimal logistic'],
y=[4382,850,465,937])
plt.title('Caretracker Enrollment across Policies')
plt.ylabel('Number of patients')
```
### Recommendation
- Even though the 'optimal' threshold for logistic regression will save 8k over the default logistic regression, it is too sensitive to cluster probabilities
- Given that we have a very small sample size, these probabilities are bound to change
- The recommendation is to use the default logistic regression, which assigns 310 people to caretracker (vs. 210 for the optimal logistic)
- Still have savings of $.48 MM
| github_jupyter |
# **The Data Science Method**
1. [**Problem Identification**](https://medium.com/@aiden.dataminer/the-data-science-method-problem-identification-6ffcda1e5152)
2. [Data Wrangling](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-data-collection-organization-and-definitions-d19b6ff141c4)
* Data Organization - Using cookiecutter template with some modifications to start.
* Data Collection - Collected data from wikipedia and yahoo finance price dataset. The wikipedia showed us the current S&P 500 companies and then used their ticker symbols to query yahoo finance adj. close prices.
- Load the S&P 500 tickers from wikipedia page
- Get S&P 500 Index (^GSPC) as a Bench Mark
- Use S&P Symbols to Get Adj Close from Yahoo Finance
* Data Cleaning - The S&P 500 data from yahoo finance price is almost clean and ready for analysis use. Need to remove tickers that IPO or die mid year, creating usable Nan values.
* Basic Data Visualizations
* Data Definition
3. [Exploratory Data Analysis](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-exploratory-data-analysis-bc84d4d8d3f9)
* Build data profile tables and plots
- Cumulative Return
- Annualized Return
- Daily Return
- Mean Daily Return
- Standard Deviation Daily Return
- Simple Moving Average
- Exponential Moving Average
- Moving Average Convergence Divergence
- Adj. Close & Daily Return Covariance
- Adj. Close & Daily Return Correlation
- Sharpe Ratio
- Skew
- Kurtosis
* Explore data relationships
* Identification and creation of features
4. [Pre-processing and Training Data Development](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-pre-processing-and-training-data-development-fd2d75182967)
* Create dummy or indicator features for categorical variables
* Standardize the magnitude of numeric features
* Split into testing and training datasets
* Apply scalar to the testing set
5. [Modeling](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-modeling-56b4233cad1b)
* Create dummy or indicator features for categorical variable
* Fit Models with Training Data Set
* Review Model Outcomes — Iterate over additional models as needed.
* Identify the Final Model
6. [Documentation](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-documentation-c92c28bd45e6)
* Review the Results
* Present and share your findings - storytelling
* Finalize Code
* Finalize Documentation
# Problem Identification
The world of financial asset management is difficult to understand. It takes time and money to research and analyze assets. Some of the most interesting and high profile assets are assets in the form of securities. In the United States, financial securities are simply tradable assets such as debts, equities, and derivatives. Nowadays, the financial asset that most U.S. citizens think of is equities and more specifically common stocks. They have begun to believe it is the best indicator of economic growth.
Sure you can buckle down and evaluate stock after stock, company after company, CEO after CEO, quarter after quarter. But is there an easier way for common folks without the financial know how or even just time. ETFs, Exchange-Traded Funds are portfolios that are traded on stock exchanges just like any other stock. ETF portfolios can contain assets such as stocks, bonds, currencies, and/or commodities. This makes it much easier for an everyday investor to invest with lower risk and little to no supervision.
The most common ETFs follow the S&P 500 index (^GSPC) like The Vanguard Group (VOO), iShares (IVV), and State Street Corporation (SPY). The S&P is a capitalization-weighted index, which means it indexes the equities by allocating with capitalization, the stock’s share price multiplied by the number of outstanding shares.
| github_jupyter |
# SageMaker Tensorflow를 이용한 MNIST 학습
MNIST는 필기 숫자 분류하는 문제로 이미지 처리의 테스트용으로 널리 사용되는 데이터 세트입니다. 28x28 픽셀 그레이스케일로 70,000개의 손으로 쓴 숫자 이미지가 레이블과 함께 구성됩니다. 데이터 세트는 60,000개의 훈련 이미지와 10,000개의 테스트 이미지로 분할됩니다. 0~9까지 10개의 클래스가 있습니다. 이 튜토리얼은 SageMaker에서 Tensorflow V2를 이용하여 MNIST 분류 모델을 훈련하는 방법을 보여줍니다.
```
import sagemaker
sagemaker.__version__
import os
import json
import sagemaker
from sagemaker.tensorflow import TensorFlow
from sagemaker import get_execution_role
sess = sagemaker.Session()
role = get_execution_role()
output_path='s3://' + sess.default_bucket() + '/tensorflow/mnist'
```
## TensorFlow Estimator
Tensorflow 클래스를 사용하면 SageMaker의 컨테이너 환경에서 학습 스크립트를 실행할 수 있습니다.
다음 파라미터 설정을 통해 환경을 셋업합니다.
- entry_point: 트레이닝 컨테이너에서 신경망 학습을 위해 사용하는 사용자 정의 파이썬 파일. 다음 섹션에서 다시 논의됩니다.
- role: AWS 자원에 접근하기 위한 IAM 역할(role)
- instance_type: 스크립트를 실행하는 SAGEMAKER 인스턴스 유형. 본 노트북을 실행하기 위해 사용중인 SageMaker 인스턴스에서 훈련 작업을 실행하려면`local`로 설정하십시오.
- model_dir: 학습중에 체크 포인트 데이터와 모델을 내보내는 S3 Bucket URI. (default : None). 이 매개변수가 스크립트에 전달되는 것을 막으려면 `model_dir`=False 로 설정하 수 있습니다.
- instance count: 학습작업이 실행될 인스턴스의 갯수. 분산 학습을 위해서는 1 이상의 값이 필요합니다.
- output_path: 학습의 결과물 (모델 아티팩트와 out 파일)을 내보내는 S3 Bucket URI.
- framework_version: 사용하는 프레임워크의 버전
- py_version: 파이썬 버전
보다 자세한 내용은 [the API reference](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html#sagemaker.estimator.EstimatorBase)를 참조합니다.
## 학습을 위한 entry point 스트립트 작성
`entrypoint`를 통해 Tensorflow 모델을 학습하기 위한 Python 코드를 Enstimator (Tensroflow 클래스)에 제공합니다.
SageMaker Tensorflow Estimator는 AWS의 관리환경으로 Tensorflow 실행환경이 저장된 도커 이미지를 가져올 것입니다. Estimator 클래스를 초기화할 때 사용한 파라미터 설정에 따라 스크립트를 실행합니다.
실행되는 훈련 스크립트는 Amazon SageMaker 외부에서 실행될 수있는 훈련 스크립트와 매우 유사하지만 교육 이미지에서 제공하는 환경 변수에 액세스 하는 설정 등이 추가될 수 있습니다. 사용가능한 환경변수의 리스트를 확인하려면 다음 리소스 [the short list of environment variables provided by the SageMaker service](https://sagemaker.readthedocs.io/en/stable/frameworks/mxnet/using_mxnet.html?highlight=entry%20point)를 참고하십시오. 환경변수의 풀셋은 다음 링크 [the complete list of environment variables](https://github.com/aws/sagemaker-training-toolkit/blob/master/ENVIRONMENT_VARIABLES.md)에서 확인할 수 있습니다.
본 예제에서는 `code/train.py` 스크립트를 사용합니다.
```
!pygmentize 'code/train.py'
```
### 하이퍼파리미터 설정
추가로, Tensorflow Estimator는 명령라인 매개변수로 학습작업에서 사용할 하이퍼파라미터를 전달합니다.
<span style="color:red"> Note: SageMaker Studio 에서는 local mode가 지원되지 않습니다. </span>
```
# set local_mode if you want to run the training script on the machine that runs this notebook
instance_type='ml.c4.xlarge'
est = TensorFlow(
entry_point='train.py',
source_dir='code', # directory of your training script
role=role,
framework_version='2.3.1',
model_dir=False, # don't pass --model_dir to your training script
py_version='py37',
instance_type=instance_type,
instance_count=1,
output_path=output_path,
hyperparameters={
'batch-size':512,
'epochs':10,
'learning-rate': 1e-3,
'beta_1' : 0.9,
'beta_2' : 0.999
}
)
```
학습 컨테이너는 아래와 같은 방식으로 하이퍼파라미터를 전달하고 스크립트를 실행할것입니다.
```
python train.py --batch-size 32 --epochs 10 --learning-rate 0.001
--beta_1 0.9 --beta_2 0.999
```
## 학습 & 테스트 데이터 채널 지정
Tensorflow Estimator에게 학습 및 테스트 데이터셋을 찾을 수있는 위치를 알려야합니다. S3 버킷에 대한 링크 또는 로컬 모드를 사용하는 경우 로컬 파일 시스템의 경로가 될 수 있습니다. 이 예에서는 공용 S3 버킷에서 MNIST 데이터를 다운로드하고 기본 버킷에 업로드합니다.
```
import logging
import boto3
from botocore.exceptions import ClientError
# Download training and testing data from a public S3 bucket
def download_from_s3(data_dir='/tmp/data', train=True):
"""Download MNIST dataset and convert it to numpy array
Args:
data_dir (str): directory to save the data
train (bool): download training set
Returns:
None
"""
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if train:
images_file = "train-images-idx3-ubyte.gz"
labels_file = "train-labels-idx1-ubyte.gz"
else:
images_file = "t10k-images-idx3-ubyte.gz"
labels_file = "t10k-labels-idx1-ubyte.gz"
with open('code/config.json', 'r') as f:
config = json.load(f)
# download objects
s3 = boto3.client('s3')
bucket = config['public_bucket']
for obj in [images_file, labels_file]:
key = os.path.join("datasets/image/MNIST", obj)
dest = os.path.join(data_dir, obj)
if not os.path.exists(dest):
s3.download_file(bucket, key, dest)
return
download_from_s3('/tmp/data', True)
download_from_s3('/tmp/data', False)
# upload to the default bucket
prefix = 'mnist'
bucket = sess.default_bucket()
loc = sess.upload_data(path='/tmp/data', bucket=bucket, key_prefix=prefix)
channels = {
"training": loc,
"testing": loc
}
```
학습 실행시 `channels` 딕셔너리는 컨테이너 내에 `SM_CHANNEL_<key name>` 형태의 환경 변수를 만듭니다.
본 사례에서는 `SM_CHANNEL_TRAINING`과 `SM_CHANNEL_TESTING` 이라는 이름으로 생성될 것입니다. `code/train.py` 에서 해당 값을 어떻게 참조하는지 살펴보십시오. 보다 자세한 내용은 [SM_CHANNEL_{channel_name}](https://github.com/aws/sagemaker-training-toolkit/blob/master/ENVIRONMENT_VARIABLES.md#sm_channel_channel_name)를 참조합니다.
필요시 다음과 같이 검증 채널을 추가할 수 있습니다.
```
channels = {
'training': train_data_loc,
'validation': val_data_loc,
'test': test_data_loc
}
```
위 코드에 의해서는 다음 채널이 스크립트에서 사용가능하게 될 것입니다.
`SM_CHANNEL_VALIDATION`.
## SageMaker 학습작업 실행
이제 훈련 컨테이너에는 교육용 스크립트를 실행할 수 있습니다. fit 명령을 호출하여 컨테이너를 시작할 수 있습니다
```
est.fit(inputs=channels)
```
## 저장된 모델 데이터 확인
이제 교육이 완료되면 모델 아티팩트가 `output_path`에 저장됩니다.
```
tf_mnist_model_data = est.model_data
print("Model artifact saved at:\n", tf_mnist_model_data)
```
이제 현재 노트북 커널에 변수 `model_data`를 저장합니다. 다음 노트북에서 모델 아티팩트를 검색하고 SageMaker 엔드 포인트에 배포하는 방법을 배우게됩니다.
```
%store tf_mnist_model_data
```
## 학습컨테이너에서 실행하기 전에 스크립트를 테스트하고 디버깅하기
앞서 사용한 `train.py`는 테스트가 완료된 코드이며, 바로 학습 컨테이너에서 실행할 수 있습니다. 하지만 해당 스크립트를 개발할 때에는, SageMaker로 보내기 전에 로컬 환경에서 컨테이너 환경을 시뮬레이션하고 테스트해야할 수 있습니다. 컨테이너 환경에서 테스트와 디버깅을 하는 것이 번거롭다면 다음과 같은 코드를 참조하여 활용할 수 있습니다.
```
!pygmentize code/test_train.py
```
In [the next notebook](get_started_mnist_deploy.ipynb) you will see how to deploy your
trained model artifacts to a SageMaker endpoint.
| github_jupyter |
# VacationPy
----
#### Note
* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
```
### Store Part I results into DataFrame
* Load the csv exported in Part I to a DataFrame
```
weather_data = pd.read_csv("../output_data/weather_data.csv")
weather_data
```
### Humidity Heatmap
* Configure gmaps.
* Use the Lat and Lng as locations and Humidity as the weight.
* Add Heatmap layer to map.
```
# Configure gmaps
gmaps.configure(api_key = g_key)
# Store Humidity as weight, find maxiumum Humidity
humidity = weather_data["Humidity (%)"]
humidity_max = float(humidity.max())
# Store Latitude and Longitude as locations
locations = weather_data[["Latitude", "Longitude"]]
# Plot Heatmap
fig = gmaps.figure()
# Create Heat layer
heat_layer = gmaps.heatmap_layer(locations, weights = humidity, dissipating = False, max_intensity = humidity_max, point_radius = 3)
# Add Heat layer and display
fig.add_layer(heat_layer)
fig
```
### Create new DataFrame fitting weather criteria
* Narrow down the cities to fit weather conditions.
* Drop any rows will null values.
```
# Find cities with max temperature between 21°C and 27°C, wind speed less than 5 m/s and 0 cloudiness
narrowed_weather_data = weather_data.loc[(weather_data["Max Temperature (°C)"] > 21) & (weather_data["Max Temperature (°C)"] < 27) & (weather_data["Cloudiness (%)"] == 0) & (weather_data["Wind Speed (m/s)"] < 5), :]
# Drop any rows with null values
narrowed_weather_data = narrowed_weather_data.dropna(how='any')
narrowed_weather_data.reset_index(inplace=True)
del narrowed_weather_data['index']
# Display narrowed down cities
narrowed_weather_data
```
### Hotel Map
* Store into variable named `hotel_df`.
* Add a "Hotel Name" column to the DataFrame.
* Set parameters to search for hotels with 5000 meters.
* Hit the Google Places API for each city's coordinates.
* Store the first Hotel result into the DataFrame.
* Plot markers on top of the heatmap.
```
# Store into hotel_df variable
hotel_df = narrowed_weather_data
# Add Hotel Name column
hotel_df["Hotel Name"] = " "
# Display new DataFrame
hotel_df
# Empty list to hold hotel names
hotel_ls = []
# Construct URL
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# Set parameters
params = {"type": "hotel",
"keyword": "hotel",
"radius": 5000,
"key" : g_key}
# Iterate through cities in hotel_df
for index, row in hotel_df.iterrows():
# Find latitude and longitude for each city in DataFrame
lat = row["Latitude"]
lng = row["Longitude"]
# Add location to parameters
params["location"] = f"{lat},{lng}"
# Construct URL and make API request
response = requests.get(base_url, params = params).json()
try:
hotel_ls.append(response["results"][0]["name"])
except:
hotel_ls.append("NaN")
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
hotel_df["Hotel Name"] = hotel_ls
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Latitude", "Longitude"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
fig.add_layer(markers)
# Display figure
fig
```
| github_jupyter |

[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/21.Gender_Classifier.ipynb)
# 21. Gender Classifier
**Gender Classifier** detects the gender of the patient in the clinical document.
It can classify the documents into `Female`, `Male` and `Unknown`.
-'**Classifierdl_gender_sbert**' (works with licensed `sbiobert_base_cased_mli`)
It has been trained on more than four thousands clinical documents (radiology reports, pathology reports, clinical visits etc.) which were annotated internally.
## Colab Setup
```
import json, os
from google.colab import files
if 'spark_jsl.json' not in os.listdir():
license_keys = files.upload()
os.rename(list(license_keys.keys())[0], 'spark_jsl.json')
with open('spark_jsl.json') as f:
license_keys = json.load(f)
# Defining license key-value pairs as local variables
locals().update(license_keys)
os.environ.update(license_keys)
# Installing pyspark and spark-nlp
! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION
# Installing Spark NLP Healthcare
! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET
import json
import os
import sparknlp_jsl
import sparknlp
from pyspark.ml import Pipeline,PipelineModel
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(license_keys['SECRET'],params=params)
print("Spark NLP Version :", sparknlp.version())
print("Spark NLP_JSL Version :", sparknlp_jsl.version())
spark
# if you want to start the session with custom params as in start function above
def start(secret):
builder = SparkSession.builder \
.appName("Spark NLP Licensed") \
.master("local[*]") \
.config("spark.driver.memory", "16G") \
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
.config("spark.kryoserializer.buffer.max", "2000M") \
.config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.11:"+version) \
.config("spark.jars", "https://pypi.johnsnowlabs.com/"+secret+"/spark-nlp-jsl-"+jsl_version+".jar")
return builder.getOrCreate()
#spark = start(secret)
```
# Gender Classifier Pipeline with **sbert**
```
document = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sbert_embedder = BertSentenceEmbeddings().pretrained("sbiobert_base_cased_mli", 'en', 'clinical/models')\
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")\
.setMaxSentenceLength(512)
gender_classifier = ClassifierDLModel.pretrained( 'classifierdl_gender_sbert', 'en', 'clinical/models') \
.setInputCols(["document", "sentence_embeddings"]) \
.setOutputCol("class")
gender_pred_pipeline_sbert = Pipeline(stages = [
document,
sbert_embedder,
gender_classifier
])
empty_data = spark.createDataFrame([[""]]).toDF("text")
model_sbert = gender_pred_pipeline_sbert.fit(empty_data)
text ="""social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home.family history: shows a family history of breast cancer."""
gender_pipeline_sbert = LightPipeline(model_sbert)
result = gender_pipeline_sbert.annotate(text)
result['class'][0]
```
### Sample Clinical Notes
```
text1 = '''social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home.
family history: shows a family history of breast cancer.'''
result = gender_pipeline_sbert.annotate(text1)
result['class'][0]
text2 = '''The patient is a 48- year-old, with severe mitral stenosis diagnosed by echocardiography, moderate
aortic insufficiency and moderate to severe pulmonary hypertension who is being evaluated as a part of a preoperative
workup for mitral and possible aortic valve repair or replacement.'''
result = gender_pipeline_sbert.annotate(text2)
result['class'][0]
text3 = '''HISTORY: The patient is a 57-year-old XX, who I initially saw in the office on 12/27/07, as a referral from the Tomball Breast Center.
On 12/21/07, the patient underwent image-guided needle core biopsy of a 1.5 cm lesion at the 7 o'clock position of the left breast (inferomedial).
The biopsy returned showing infiltrating ductal carcinoma high histologic grade.
The patient stated that xx had recently felt and her physician had felt a palpable mass in that area prior to her breast imaging.'''
result = gender_pipeline_sbert.annotate(text3)
result['class'][0]
text4 = '''The patient states that xx has been overweight for approximately 35 years and has tried multiple weight loss modalities in
the past including Weight Watchers, NutriSystem, Jenny Craig, TOPS, cabbage diet, grape fruit diet, Slim-Fast, Richard Simmons,
as well as over-the-counter measures without any long-term sustainable weight loss.
At the time of presentation to the practice, xx is 5 feet 6 inches tall with a weight of 285.4 pounds and a body mass index of 46.
xx has obesity-related comorbidities, which includes hypertension and hypercholesterolemia.'''
result = gender_pipeline_sbert.annotate(text4)
result['class'][0]
text5 = '''Prostate gland showing moderately differentiated infiltrating adenocarcinoma,
Gleason 3 + 2 extending to the apex involving both lobes of the prostate, mainly right.'''
result = gender_pipeline_sbert.annotate(text5)
result['class'][0]
text6 = '''SKIN: The patient has significant subcutaneous emphysema of the upper chest and
anterior neck area although he states that the subcutaneous emphysema has improved significantly since yesterday.'''
result = gender_pipeline_sbert.annotate(text6)
result['class'][0]
text7 = '''INDICATION: The patient is a 42-year-old XX who is five days out from transanal excision of a benign anterior base lesion.
xx presents today with diarrhea and bleeding. Digital exam reveals bright red blood on the finger.
xx is for exam under anesthesia and control of hemorrhage at this time.
'''
result = gender_pipeline_sbert.annotate(text7)
result['class'][0]
text8 = '''INDICATION: ___ year old patient with complicated medical history of paraplegia
and chronic indwelling foley, recurrent MDR UTIs, hx Gallbladder fossa
abscess,type 2 DM, HTN, CAD, DVT s/p left AKA complicated complicated by
respiratory failure requiring tracheostomy and PEG placement, right ischium
osteomyelitis due to chronic pressure ulcers with acute shortness of breath...'''
result = gender_pipeline_sbert.annotate(text8)
result['class'][0]
```
| github_jupyter |
# Chapter 7: n-step Bootstrapping
## 1. n-step TD Prediction
- Generalize one-step TD(0) method
- Temporal difference extends over n-steps

- Want to update estimated value $v_\pi(S_t)$ of state $S_t$ from:
$$S_t,R_{t+1},S_{t+1},R_{t+1},...,R_T,S_T$$
- for *MC*, target is complete return
$$G_t = R_{t+1}+\gamma R_{t+3}+\gamma^2R_{t+3}+...+\gamma^{T-t-1}R_T$$
- for *TD*, one-step method
$$G_{t:t+1} = R_{t+1}+\gamma V_t(S_{t+1})$$
- for *two-step TD*, one-step method
$$G_{t:t+2} = R_{t+1}+\gamma R_{t+2}+\gamma^2V_{t+1}(S_{t+2})$$
- for *n-step TD*, one-step method with $n\ge 1, 0\le t<T-n$
$$
\begin{cases}
G_{t:t+n} &= R_{t+1}+\gamma R_{t+2}+...+\gamma^{n-1}R_{t+n}+\gamma^nV_{t+n-1}(S_{t+n})
\\G_{t:t+n} &= G_t ~~~,\text{if } t+n\ge T
\end{cases}
$$
- Wait for $R_{t+n}, V_{t+n-1}$, until time $t+n$, then update estimate values:
$$V_{t+n}(S_t) = V_{t+n-1}(S_t)+\alpha\big[G_{t:t+n}-V_{t+n-1}(S_t)\big] ~~~, 0\le t<T$$
- all other states remain unchanged: $V_{t+n}(s)=V_{t+n-1}(s), \forall s\neq S_t$

- **Error Reduction Property** of n-step returns:
$$\max_s\big| E_\pi[G_{t:t+n} | S_t=s]-v_\pi(s)\big| \le \gamma^n\max_s\big| V_{t+n-1}(s)-v_\pi(s)\big|, \forall n\ge 1$$
- Can show formally that n-step TD methods converge to the correct predictions
## 2. n-step Sarsa
- Switch states for actions (state-action pairs) and then use an ε-greedy policy

- n-step returns for action-value:
$$G_{t:t+n}=R_{t+1}+\gamma R_{t+2}+...+\gamma^{n-1}R_{t+n}+\gamma^nQ_{t+n-1}(S_{t+n},A_{t+n})~~~, n\ge 1, 0\le t<T-n$$
with $G_{t:t+n}=G_t \text{ if }t+n\ge T$
- **n-step Sarsa**:
$$Q_{t+n}(S_t,A_t)=Q_{t+n-1}(S_t,A_t)+\alpha\big[G_{t:t+n}-Q_{t+n-1}(S_t,A_t)\big]~~~,0\le t<T$$
- **n-step Expected Sarsa**:
$$G_{t:t+n}=R_{t+1}+\gamma R_{t+2}+...+\gamma^{n-1}R_{t+n}+\gamma^n\overline V_{t+n-1}(S_{t+n},A_{t+n})~~~, t+n<T$$
- where, *expected approximate value* of state $s$:
$$\overline V_t(s)=\sum_a\pi(a | s)Q_t(s,a) ~~~, \forall s\in\mathcal S$$
- if $s$ is terminal, then $\overline V(s)=0$

## 3. n-step Off-policy Learning
- Use relative probability of just n actions:
$$\rho_{t:h}=\prod_{k=t}^{\min(h,T-1)}\frac{\pi(A_k | S_k)}{b(A_k | S_k)}$$
- n-step TD:
$$V_{t+n}(S_t)=V_{t+n-1}(S_t)+\alpha\color{blue}{\rho_{t:t+n-1}}\big[G_{t:t+n}-V_{t+n-1}(S_t)\big]~~~,0\le t<T$$
- n-step Sarsa:
$$Q_{t+n}(S_t,A_t)=V_{t+n-1}(S_t,A_t)+\alpha\color{blue}{\rho_{t+1:t+n}}\big[G_{t:t+n}-Q_{t+n-1}(S_t,A_1)\big]~~~,0\le t<T$$
- n-step Expected Sarsa:
$$Q_{t+n}(S_t,A_t)=V_{t+n-1}(S_t,A_t)+\alpha\color{blue}{\rho_{t+1:t+n-1}}\big[G_{t:t+n}-Q_{t+n-1}(S_t,A_1)\big]~~~,0\le t<T$$

## 4. Per-decision Methods with Control Variates
- add *control variate* to **off-policy** of n-step return to reduce variance
$$G_{t:h}=\rho_t(R_{t+1}+\gamma G_{t+1:h})+(1-\rho_t)V_{h-1}(S_t) ~~~,t<h<T$$
where, $G_{h:h}=V_{h-1}(S_h)$
- if $\rho_t=0$, then the target does not change
- Includes on-policy when $\rho_t=1$
- for action values, the first action does not play a role in the importance sampling
$$
\begin{aligned}
G_{t:h} &= R_{t+1}+\gamma\big(\rho_{t+1}G_{t+1:h}+\overline V_{h-1}(S_{t+1})-\rho_{t+1}Q_{h-1}(S_{t+1},A_{t+1})\big)
\\ &= R_{t+1}+\gamma\rho_{t+1}\big(G_{t+1:h}-Q_{h-1}(S_{t+1},A_{t+1})\big)+\gamma\overline V_{h-1}(S_{t+1})
\end{aligned}
$$
where, $t<h\le T$, if $h<T$, then $G_{h:h}=Q_{h-1}(S_h,A_h)$, else $G_{T-1:h}=R_T$
## 5. Off-policy Learning Without Importance Sampling: The n-step Tree Backup Algorithm
- Use **left nodes** to estimate action-values

- one-step return is them same as Expected Sarsa for $t<T-1$:
$$G_{t:t+1}=R_{t+1}+\gamma\sum_a\pi(a | S_{t+1})Q_t(S_{t+1},a)$$
- two-step tree-backup for $t<T-2$:
$$
\begin{aligned}
G_{t:t+1} &= R_{t+1}+\gamma\sum_{a\neq A_{t+1}}\pi(a | S_{t+1})Q_{t+1}(S_{t+1},a)
\\ & ~~~ +\gamma\pi(A_{t+1} | S_{t+1})\big(R_{t+1:t+2}\gamma\sum_{a\neq A_{t+1}}\pi(a | S_{t+2})Q_{t+1}(S_{t+2},a)\big)
\\ &= R_{t+1}+\gamma\sum_{a\neq A_{t+1}}\pi(a | S_{t+1})Q_{t+1}(S_{t+1},a)+\gamma\pi(A_{t+1} | S_{t+1})Q_{t+1:t+2}
\end{aligned}
$$
- n-step tree-backup for $t<T-1,n\ge 2$:
$$G_{t:t+1} = R_{t+1}+\gamma\sum_{a\neq A_{t+1}}\pi(a | S_{t+1})Q_{t+1}(S_{t+1},a)+\gamma\pi(A_{t+1} | S_{t+1})Q_{t+1:t+n}$$
- action-value update rule as usual from n-step Sarsa:
$$Q_{t+n}(S_t,A_t)=Q_{t+n-1}(S_t,A_t)+\alpha[G_{t:t+n}-Q_{t+n-1}(S_t,A_t)]$$
for, $0\le t < T$

## 6. A Unifying Algorithm: n-step Q(σ)

- $\sigma_t\in[0,1]$ denote the degree of sampling on step $t$
- $\sigma=0$ for full sampling
- $\sigma=1$ for pure expection
- Rewrite the n-step back-up tree as:
$$
\begin{aligned}
G_{t:h} &= R_{t+1}+\gamma\sum_{a\neq A_{t+1}}\pi(a | S_{t+1})Q_{h-1}(S_{t+1},a)+\gamma\pi(A_{t+1} | S_{t+1})G_{t+1:h}
\\ &= R_{t+1}+\gamma\overline V_{h-1}(S_{t+1})-\gamma\pi(A_{t+1} | S_{t+1})Q_{h-1}(S_{t+1},A_{t+1})+\gamma\pi(A_{t+1} | S_{t+1})G_{t+1:h}
\\ &= R_{t+1}+\gamma\pi(A_{t+1} | S_{t+1})\big(G_{t+1:h}-Q_{h-1}(S_{t+1},A_{t+1})\big)+\gamma\overline V_{h-1}(S_{t+1})
\end{aligned}
$$
- n-step $Q(\sigma)$:
$$G_{t:h}=R_{t+1}+\gamma\big(\sigma_{t+1}\rho_{t+1}+(1-\sigma_{t+1})\pi(A_{t+1} | S_{t+1})\big)\big(G_{t+1:h}-Q_{h-1}(S_{t+1},A_{t+1})\big)+\gamma\overline V_{h-1}(S_{t+1})$$
where, $t<h\le T$
- if $h<T$, then $G_{h:h}=Q_{h-1}(S_h,A_h)$
- if $h=T$, then $G_{T-1:T}=R_T$

| github_jupyter |
This script takes the notebook with RNA and DNA BSID's and collects information for the corresponding samples from fusion summary files, breakpoint density files, GISTIC CNA broad_values file and FPKM files
```
import argparse
import pandas as pd
import numpy as np
import zipfile
import statistics
import scipy
from scipy import stats
# Reading all the input files
zip=zipfile.ZipFile("/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/data/pbta-cnv-cnvkit-gistic.zip")
CNA=pd.read_csv(zip.open("2019-12-10-gistic-results-cnvkit/broad_values_by_arm.txt"), sep="\t")
CNA = CNA.set_index("Chromosome Arm")
gsva = pd.read_csv("/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/gene-set-enrichment-analysis/results/gsva_scores_stranded.tsv", sep="\t")
gsva_NFKB = gsva.loc[gsva['hallmark_name'] == "HALLMARK_TNFA_SIGNALING_VIA_NFKB"]
gsva_NFKB = gsva_NFKB.set_index("Kids_First_Biospecimen_ID")
fpkm_df = pd.read_csv("/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/molecular-subtyping-EPN/epn-subset/epn-pbta-gene-expression-rsem-fpkm-collapsed.stranded.tsv.gz", sep = "\t")
fpkm_df = fpkm_df.set_index("GENE")
zscore_fpkm_df = fpkm_df.apply(scipy.stats.zscore)
fusion = pd.read_csv("/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/fusion-summary/results/fusion_summary_ependymoma_foi.tsv", sep="\t")
fusion = fusion.set_index("Kids_First_Biospecimen_ID")
breakpoint_density = pd.read_csv("/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/chromosomal-instability/breakpoint-data/union_of_breaks_densities.tsv", sep="\t")
breakpoint_density = breakpoint_density.set_index("samples")
EPN_notebook = pd.read_csv("/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/molecular-subtyping-EPN/results/EPN_molecular_subtype.tsv", sep="\t")
# This function takes in a GISTIC broad_values
# and a string (loss/gain) and returns 0/1 accordingly
def DNA_samples_fill_df(CNA_value, loss_gain):
if CNA_value<0 and loss_gain=="loss":
return(1)
elif loss_gain=="gain" and CNA_value>0:
return(1)
else:
return(0)
# Function to generate Z-scores column for every gene
def fill_df_with_fpkm_zscores(df,fpkmdf, column_name, gene_name):
zscore_list = scipy.stats.zscore(np.array(df.apply(lambda x: fpkmdf.loc[gene_name, x["Kids_First_Biospecimen_ID_RNA"]], axis=1)))
df[column_name] = pd.Series(zscore_list)
return(df)
# Input notebook before adding columns
EPN_notebook.head()
# Input. CNA file
CNA.head()
#Adding columns to EPN_notebook based on values from CNA file (boolean value)
# Matching based on DNA BSID (row names in CNA file and column names in EPN_notebook) -> Look at row 4 below
EPN_notebook["1q_loss"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc["1q", x["Kids_First_Biospecimen_ID_DNA"]], "loss")
if x["Kids_First_Biospecimen_ID_DNA"] is not np.nan else 0,axis=1)
EPN_notebook.head()
#. Similar to the above, adding more columns to EPN_notebook
EPN_notebook["9p_loss"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc["9p", x["Kids_First_Biospecimen_ID_DNA"]], "loss")
if x["Kids_First_Biospecimen_ID_DNA"] is not np.nan else 0,axis=1)
EPN_notebook["9q_loss"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc["9q", x["Kids_First_Biospecimen_ID_DNA"]], "loss")
if x["Kids_First_Biospecimen_ID_DNA"] is not np.nan else 0,axis=1)
EPN_notebook["6p_loss"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc["6p", x["Kids_First_Biospecimen_ID_DNA"]], "loss")
if x["Kids_First_Biospecimen_ID_DNA"] is not np.nan else 0,axis=1)
EPN_notebook["6q_loss"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc["6q", x["Kids_First_Biospecimen_ID_DNA"]], "loss")
if x["Kids_First_Biospecimen_ID_DNA"] is not np.nan else 0,axis=1)
EPN_notebook["11q_loss"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc["11q", x["Kids_First_Biospecimen_ID_DNA"]], "loss")
if x["Kids_First_Biospecimen_ID_DNA"] is not np.nan else 0,axis=1)
EPN_notebook["11q_gain"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc["11q", x["Kids_First_Biospecimen_ID_DNA"]], "gain")
if x["Kids_First_Biospecimen_ID_DNA"] is not np.nan else 0,axis=1)
EPN_notebook.head(4)
gsva_NFKB.head(3)
# GSVA. score for NFKB score
# Adding column for NFKB GSEA_score to EPN_notebook
# If DNA sample BSID not found, then fill with "NA"
EPN_notebook["breaks_density-chromosomal_instability"] = EPN_notebook.apply(lambda x: breakpoint_density.loc[x["Kids_First_Biospecimen_ID_DNA"], "breaks_density"]
if x["Kids_First_Biospecimen_ID_DNA"] is not np.nan else "NA", axis=1)
EPN_notebook.head(3)
# Printing. FPKM dataframe
fpkm_df.head(2)
# Adding FPKM for different genes to EPN_notebook using function fill_df_with_fpkm_zscores
EPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, "RELA_expr_Z-scores", "RELA")
EPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, "L1CAM_expr_Zscore", "L1CAM")
EPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, "ARL4D_expr_Zscore", "ARL4D")
EPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, "CLDN1_expr_zscore", "CLDN1")
EPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, "CXorf67_expr_zscore", "CXorf67")
EPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, "TKTL1_expr_zscore", "TKTL1")
EPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, "GPBP1_expr_zscore", "GPBP1")
EPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, "IFT46_expr_zscore", "IFT46")
EPN_notebook.head(4)
# Finally print out the dataframe to an output file
```
| github_jupyter |
```
# importing libraries
import h5py
import scipy.io as io
import PIL.Image as Image
import numpy as np
import os
import glob
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import scipy
from scipy import spatial
import json
from matplotlib import cm as CM
from image import *
from model import CSRNet
import torch
from tqdm import tqdm
%matplotlib inline
# function to create density maps for images
def gaussian_filter_density(gt):
print (gt.shape)
density = np.zeros(gt.shape, dtype=np.float32)
gt_count = np.count_nonzero(gt)
if gt_count == 0:
return density
pts = np.array(list(zip(np.nonzero(gt)[1], np.nonzero(gt)[0])))
leafsize = 2048
# build kdtree
tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize)
# query kdtree
distances, locations = tree.query(pts, k=4)
print ('generate density...')
for i, pt in enumerate(pts):
pt2d = np.zeros(gt.shape, dtype=np.float32)
pt2d[pt[1],pt[0]] = 1.
if gt_count > 1:
sigma = (distances[i][1]+distances[i][2]+distances[i][3])*0.1
else:
sigma = np.average(np.array(gt.shape))/2./2. #case: 1 point
density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')
print ('done.')
return density
#setting the root to the Shanghai dataset you have downloaded
# change the root path as per your location of dataset
root = '../ShanghaiTech/'
part_A_train = os.path.join(root,'part_A/train_data','images')
part_A_test = os.path.join(root,'part_A/test_data','images')
part_B_train = os.path.join(root,'part_B/train_data','images')
part_B_test = os.path.join(root,'part_B/test_data','images')
path_sets = [part_A_train,part_A_test]
img_paths = []
for path in path_sets:
for img_path in glob.glob(os.path.join(path, '*.jpg')):
img_paths.append(img_path)
for img_path in img_paths:
print (img_path)
mat = io.loadmat(img_path.replace('.jpg','.mat').replace('images','ground-truth').replace('IMG_','GT_IMG_'))
img= plt.imread(img_path)
k = np.zeros((img.shape[0],img.shape[1]))
gt = mat["image_info"][0,0][0,0][0]
for i in range(0,len(gt)):
if int(gt[i][1])<img.shape[0] and int(gt[i][0])<img.shape[1]:
k[int(gt[i][1]),int(gt[i][0])]=1
k = gaussian_filter_density(k)
with h5py.File(img_path.replace('.jpg','.h5').replace('images','ground-truth'), 'w') as hf:
hf['density'] = k
plt.imshow(Image.open(img_paths[0]))
gt_file = h5py.File(img_paths[0].replace('.jpg','.h5').replace('images','ground-truth'),'r')
groundtruth = np.asarray(gt_file['density'])
plt.imshow(groundtruth,cmap=CM.jet)
np.sum(groundtruth)
path_sets = [part_B_train,part_B_test]
img_paths = []
for path in path_sets:
for img_path in glob.glob(os.path.join(path, '*.jpg')):
img_paths.append(img_path)
# creating density map for part_b images
for img_path in img_paths:
print (img_path)
mat = io.loadmat(img_path.replace('.jpg','.mat').replace('images','ground-truth').replace('IMG_','GT_IMG_'))
img= plt.imread(img_path)
k = np.zeros((img.shape[0],img.shape[1]))
gt = mat["image_info"][0,0][0,0][0]
for i in range(0,len(gt)):
if int(gt[i][1])<img.shape[0] and int(gt[i][0])<img.shape[1]:
k[int(gt[i][1]),int(gt[i][0])]=1
k = gaussian_filter_density(k)
with h5py.File(img_path.replace('.jpg','.h5').replace('images','ground-truth'), 'w') as hf:
hf['density'] = k
```
| github_jupyter |
```
import pandas as pd
import numpy as np
```
##### Cargar la data de salarios
```
data = pd.read_csv('../Datasets casos de estudio 2/Case study 1/cs2.1.csv')
```
##### Variables en dataset
```
data.head()
data.dtypes
```
##### Dimensiones del dataset
```
data.shape
```
##### Estadisticos principales
```
data.describe()
```
##### Linear Regression para el salario
```
data = data.rename(columns={'wage': 'salario'})
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
lr = LinearRegression()
X = data.iloc[:,:11]
Y = data.iloc[:,11]
X.head()
Y.head()
```
##### Run Linear Specification and compute MSE and R^2
```
full_fit = lr.fit(X, Y)
y_predict = lr.predict(X)
p_fmla1 = X.shape
p_fmla1 = p_fmla1[1]
p_fmla1
#Crear formula para obtener R2
def adj_r2_score(model,y,yhat):
adj = 1 - float(len(y)-1)/(len(y)-len(model.coef_)-1)*(1 - metrics.r2_score(y,yhat))
return adj
```
##### R2 y R2 ajustado
```
# R2
r2_fmla1 = lr.score(X,Y)
r2_fmla1
# R2_adjusted
r2_adj_fmla1 = adj_r2_score(lr,Y,y_predict)
r2_adj_fmla1
```
##### MSE
```
# MSE
mse_fmla1 = mean_squared_error(Y, y_predict)
mse_fmla1
```
### Creacion de nuevas variables cuadraticas
```
from sklearn.preprocessing import PolynomialFeatures
pf = PolynomialFeatures(2, interaction_only=True)
X = X.drop(['mw','ne'], axis = 1)
X.head()
interac_var = X.iloc[:,1:]
interac_var.head()
female = np.array(X.iloc[:,0])
female
new_features = pf.fit_transform(interac_var)
new_features
print('female: {}'.format(female.shape))
print('new_features: {}'.format(new_features.shape))
X_poly = np.append(female.reshape(-1,1),new_features, axis = 1)
X_poly.shape
interac_var2 = X.iloc[:,1:3]
interac_var2.head()
new_features2 = pf.fit_transform(interac_var2)
new_features2
X_poly_2var = np.append(female.reshape(-1,1),new_features2, axis = 1)
X_poly_2var.shape
```
##### Run Quadratic specification and compute MSE an R^2
```
fmla2 = lr.fit(X_poly, Y)
p_fmla2 = X_poly.shape
p_fmla2 = p_fmla2[1]
y_predict_fmla2 = lr.predict(X_poly)
# R2
r2_fmla2 = lr.score(X_poly,Y)
# R2_adjusted
r2_adj_fmla2 = adj_r2_score(lr,Y,y_predict_fmla2)
# MSE
mse_fmla2 = mean_squared_error(Y,y_predict_fmla2)
p_fmla2
```
##### Resumen de los calculos
```
table = np.ndarray((2,4))
table[0,] = (p_fmla1, r2_fmla1, r2_adj_fmla1, mse_fmla1)
table[1,] = (p_fmla2, r2_fmla2, r2_adj_fmla2, mse_fmla2)
table = pd.DataFrame(table, columns=['p','R2','R2_adj','MSE'], index=['Basic reg','Flexible reg'])
table
##### Linear and Quadratic specifications with Sample Splitting
from sklearn.model_selection import train_test_split
```
##### Basic reg & Flexbile split
```
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.25, random_state = 42, shuffle = True)
x_train_poly, x_test_poly, y_train_poly, y_test_poly = train_test_split(X_poly, Y, test_size = 0.25, random_state = 42, shuffle = True)
```
##### Basic
```
fmla1 = lr.fit(x_train, y_train)
p_fmla1 = x_train.shape
p_fmla1 = p_fmla1[1]
y_pred_test = lr.predict(x_test)
y_pred_train = lr.predict(x_train)
print('Calculo de scores para training')
# R2
r2_fmla1_train = lr.score(x_train,y_train)
print('R^2:{}'.format(r2_fmla1_train))
# R2_adjusted
r2_adj_fmla1_train = adj_r2_score(lr, y_train, y_pred_train)
print('R^2_adj:{}'.format(r2_adj_fmla1_train))
# MSE
mse_fmla1_train = mean_squared_error(y_train, y_pred_train)
print('MSE:{}'.format(mse_fmla1_train))
print('Calculo de scores para test')
# R2
r2_fmla1_test = lr.score(x_test,y_test)
print('R^2:{}'.format(r2_fmla1_test))
# R2_adjusted
r2_adj_fmla1_test = adj_r2_score(lr, y_test, y_pred_test)
print('R^2_adj:{}'.format(r2_adj_fmla1_test))
# MSE
mse_fmla1_test = mean_squared_error(y_test, y_pred_test)
print('MSE:{}'.format(mse_fmla1_test))
```
##### Flexible
```
fmla1 = lr.fit(x_train_poly, y_train_poly)
p_fmla1_poly = x_train_poly.shape
p_fmla1_poly = p_fmla1_poly[1]
y_pred_test_poly = lr.predict(x_test_poly)
y_pred_train_poly = lr.predict(x_train_poly)
print('Calculo de scores para training')
# R2
r2_fmla1_train_poly = lr.score(x_train_poly,y_train_poly)
print('R^2:{}'.format(r2_fmla1_train_poly))
# R2_adjusted
r2_adj_fmla1_train_poly = adj_r2_score(lr, y_train_poly, y_pred_train)
print('R^2_adj:{}'.format(r2_adj_fmla1_train_poly))
# MSE
mse_fmla1_train_poly = mean_squared_error(y_train_poly, y_pred_train_poly)
print('MSE:{}'.format(mse_fmla1_train_poly))
print('Calculo de scores para test')
# R2
r2_fmla1_test_poly = lr.score(x_test_poly,y_test_poly)
print('R^2:{}'.format(r2_fmla1_test_poly))
# R2_adjusted
r2_adj_fmla1_test_poly = adj_r2_score(lr, y_test_poly, y_pred_test_poly)
print('R^2_adj:{}'.format(r2_adj_fmla1_test_poly))
# MSE
mse_fmla1_test_poly = mean_squared_error(y_test_poly, y_pred_test_poly)
print('MSE:{}'.format(mse_fmla1_test_poly))
table = np.ndarray((4,4))
table[0,] = (p_fmla1, r2_fmla1_train, r2_adj_fmla1_train, mse_fmla1_train)
table[1,] = (p_fmla1, r2_fmla1_test, r2_adj_fmla1_test, mse_fmla1_test)
table[2,] = (p_fmla1_poly, r2_fmla1_train_poly, r2_adj_fmla1_train_poly, mse_fmla1_train_poly)
table[3,] = (p_fmla1_poly, r2_fmla1_test_poly, r2_adj_fmla1_test_poly, mse_fmla1_test_poly)
table = pd.DataFrame(table, columns=['p','R2','R2_adj','MSE'], index=['Basic reg train','Basic reg test',
'Flexible reg train','Flexible reg test'])
table
```
| github_jupyter |
<a href="https://colab.research.google.com/github/arjunparmar/VIRTUON/blob/main/Harshit/SwapNet_Experimentation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from google.colab import drive
drive.mount('/content/drive')
## Imports
import os
import sys
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
from glob import glob
import tensorflow
from tensorflow import keras
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.layers import concatenate, Concatenate
## Seeding
seed = 2019
random.seed = seed
np.random.seed = seed
tensorflow.seed = seed
def load_image(img_path, show=False):
img = cv2.imread(img_path)
img = cv2.resize(img, (128,128))
img_tensor = image.img_to_array(img) # (height, width, channels)
#|img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels) # imshow expects values in the range [0, 1]
return img_tensor
!mkdir seg_train
!cp -r /content/drive/Shareddrives/Virtuon/Clothing\ Coparsing/dataset/seg_train/* /content/seg_train/
!mkdir seg_test
!cp -r /content/drive/Shareddrives/Virtuon/Clothing\ Coparsing/dataset/seg_test/* /content/seg_test/
!mkdir pos_train
!cp -r /content/drive/Shareddrives/Virtuon/Clothing\ Coparsing/dataset/pose_train/* /content/pos_train/
!mkdir pos_test
!cp -r /content/drive/Shareddrives/Virtuon/Clothing\ Coparsing/dataset/pose_test/* /content/pos_test/
x = []
y = []
def get_image(path):
data =[]
for subdir, dirs, files in os.walk(path):
for f in files:
path = os.path.join(subdir, f)
img = load_image(path)
# print(img.shape)
data.append(img)
return data
x_1 = get_image(r'/content/pos_train') #BS
x_2 = get_image(r'/content/seg_train') #CS
y = get_image(r'/content/seg_train')
x_1 = np.asarray(x_1)
x_2 = np.asarray(x_2)
y = np.asarray(y)
print(x_1.shape)
print(x_2.shape)
print(y.shape)
def down_block(x, filters, kernel_size=(3, 3), padding="same", strides=1):
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(x)
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
p = keras.layers.MaxPool2D((2, 2), (2, 2))(c)
return c, p
def up_block(x, skip, filters, kernel_size=(3, 3), padding="same", strides=1):
us = keras.layers.UpSampling2D((2, 2))(x)
concat = keras.layers.Concatenate()([us, skip])
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(concat)
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
return c
def bottleneck(x, filters, kernel_size=(3, 3), padding="same", strides=1):
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(x)
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
return c
def res_block(u3):
c1 = keras.layers.Conv2D(64, kernel_size= (3,3), padding="same", strides=1, activation="relu")(u3)
c2 = keras.layers.Conv2D(32, kernel_size= (3,3), padding="same", strides=1, activation="relu")(c1)
c3 = keras.layers.Conv2D(32, kernel_size= (3,3), padding="same", strides=1, activation="relu")(c2)
c3 = keras.layers.Concatenate()([u3, c3])
c4 = keras.layers.Conv2D(64, kernel_size= (3,3), padding="same", strides=1, activation="relu")(c3)
c5 = keras.layers.Conv2D(32, kernel_size= (3,3), padding="same", strides=1, activation="relu")(c4)
c6 = keras.layers.Conv2D(32, kernel_size= (3,3), padding="same", strides=1, activation="relu")(c5)
c6 = keras.layers.Concatenate()([u3, c3, c6])
c7 = keras.layers.Conv2D(64, kernel_size= (3,3), padding="same", strides=1, activation="relu")(c6)
c8 = keras.layers.Conv2D(32, kernel_size= (3,3), padding="same", strides=1, activation="relu")(c7)
c9 = keras.layers.Conv2D(16, kernel_size= (3,3), padding="same", strides=1, activation="relu")(c8)
return c9
K.clear_session()
def UNet():
f = [16, 32, 64, 128, 256]
inputs1 = keras.layers.Input((128,128, 3))
inputs2 = keras.layers.Input((128,128, 3))
p0 = inputs1
c1, p1 = down_block(p0, f[0]) #128 -> 64
c2, p2 = down_block(p1, f[1]) #64 -> 32
c3, p3 = down_block(p2, f[2]) #32 -> 16
bn1 = bottleneck(p3, f[3])
print(bn1.shape)
inputs2 = keras.layers.Input((128,128, 3))
np0 = inputs2
nc1, np1 = down_block(np0, f[0]) #128 -> 64
nc2, np2 = down_block(np1, f[1]) #64 -> 32
nc3, np3 = down_block(np2, f[2]) #32 -> 16
bn2 = bottleneck(np3, f[3])
print(bn2.shape)
bn = keras.layers.Concatenate()([bn1, bn2])
print(bn.shape)
u1 = up_block(bn, nc3, f[2]) #16 -> 32
u2 = up_block(u1, nc2, f[1]) #32 -> 64
u3 = up_block(u2, nc1, f[0]) #64 -> 128
print(u3.shape)
#apply resblocks
res = res_block(u3)
outputs = keras.layers.Conv2D(3, (1, 1), padding="same", activation="sigmoid")(res)
model = keras.models.Model([inputs1, inputs2], outputs)
return model
model = UNet()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["acc"])
model.summary()
#Data augmentation to generate new data from the given data at the time of each batch
# construct the training image generator for data augmentation
batch_size = 32
aug = ImageDataGenerator(rotation_range=20)
# train the network
model.fit_generator(aug.flow([x_1, x_2], y, batch_size=batch_size), steps_per_epoch=len(x_1) // batch_size, epochs=100)
def plot(img):
plt.imshow(img)
plt.axis('off')
plt.show()
p1 = r'/content/pos_test/0.jpg'
img1= cv2.imread(p1)
plot(img1)
p2 = r'/content/seg_test/0.jpg'
img2= cv2.imread(p2)
plot(img2)
img1 = load_image(p1)
img2 = load_image(p2)
print(img1.shape)
print(img2.shape)
img1 = np.expand_dims(img1, axis = 0)
img2 = np.expand_dims(img2, axis = 0)
result = model.predict([img1, img2])
# result = np.resize(result, (128,128,3))
result.shape
result = np.squeeze(result)
plt.imshow(result)
```
| github_jupyter |
Lambda School Data Science
*Unit 2, Sprint 2, Module 3*
---
# Cross-Validation
## Assignment
- [x] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.
- [x] Continue to participate in our Kaggle challenge.
- [x] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.
- [x] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
- [x] Commit your notebook to your fork of the GitHub repo.
You won't be able to just copy from the lesson notebook to this assignment.
- Because the lesson was ***regression***, but the assignment is ***classification.***
- Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification.
So you will have to adapt the example, which is good real-world practice.
1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...`
3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values)
4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html))
## Stretch Goals
### Reading
- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation
- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)
- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation
- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)
- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85)
### Doing
- Add your own stretch goals!
- Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details.
- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.
- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:
> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...
The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?
### BONUS: Stacking!
Here's some code you can use to "stack" multiple submissions, which is another form of ensembling:
```python
import pandas as pd
# Filenames of your submissions you want to ensemble
files = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']
target = 'status_group'
submissions = (pd.read_csv(file)[[target]] for file in files)
ensemble = pd.concat(submissions, axis='columns')
majority_vote = ensemble.mode(axis='columns')[0]
sample_submission = pd.read_csv('sample_submission.csv')
submission = sample_submission.copy()
submission[target] = majority_vote
submission.to_csv('my-ultimate-ensemble-submission.csv', index=False)
```
```
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'tanzania/train_features.csv'),
pd.read_csv(DATA_PATH+'tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'tanzania/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'tanzania/sample_submission.csv')
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
# Split train into train & val
train, val = train_test_split(train, train_size=0.80, test_size=0.20,
stratify=train['status_group'],
random_state=42)
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# Prevent SettingWithCopyWarning
X = X.copy()
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these values like zero.
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# When columns have zeros and shouldn't, they are like null values.
# So we will replace the zeros with nulls, and impute missing values later.
# Also create a "missing indicator" column, because the fact that
# values are missing may be a predictive signal.
cols_with_zeros = ['longitude', 'latitude', 'construction_year',
'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X[col+'_MISSING'] = X[col].isnull()
# Drop duplicate columns
duplicates = ['quantity_group', 'payment_type']
X = X.drop(columns=duplicates)
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
X['years_MISSING'] = X['years'].isnull()
# return the wrangled dataframe
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# The status_group column is the target
target = 'status_group'
# Get a dataframe with all train columns except the target
train_features = train.drop(columns=[target])
# Get a list of the numeric features
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
# Get a series with the cardinality of the nonnumeric features
cardinality = train_features.select_dtypes(exclude='number').nunique()
# Get a list of all categorical features with cardinality <= 50
categorical_features = cardinality[cardinality <= 50].index.tolist()
# Combine the lists
features = numeric_features + categorical_features
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
import numpy as np
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import RobustScaler
from sklearn.feature_selection import f_classif, SelectKBest
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.impute import SimpleImputer
from scipy.stats import randint
pipeline = make_pipeline(
ce.OrdinalEncoder(cols=['basin',
'region',
'public_meeting',
'scheme_management',
'permit',
'extraction_type',
'extraction_type_group',
'extraction_type_class',
'management',
'management_group',
'payment',
'water_quality',
'quality_group',
'quantity',
'source',
'source_type',
'source_class',
'waterpoint_type']),
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(),
StandardScaler(),
SelectKBest(f_classif, k=20),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
print('Train Accuracy', pipeline.score(X_train, y_train))
print('Validation Accuracy', pipeline.score(X_val, y_val))
k = 3
scores = cross_val_score(pipeline, X_train, y_train, cv=k,
scoring='accuracy')
print(f'MAE for {k} folds:', scores)
pipeline.get_params().keys()
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'selectkbest__k': range(1, len(X_train.columns)+1),
'randomforestclassifier__min_samples_leaf': [5, 20, 5],
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=100,
cv=5,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
print('Best hyperparameters', search.best_params_)
print()
print('Cross-validation MAE', search.best_score_)
print('Best hyperparameters', search.best_params_)
print()
print('Cross-validation MAE', search.best_score_)
```
| github_jupyter |
```
import numpy as np
# Define Cost function, lambda function, p function, alpha function
def cost(theta:float) -> float:
return theta
def la(theta:float) -> float:
return 1/theta
def p(theta:float) -> float:
return 1/theta
def al(theta:float)-> float:
return theta
# def L_al_la(la,al,x_n):
# l=2*x_n *(la-al)
# return l
# def L_al_la_d_al(x_n):
# """
# differential of L_al_la in terms of alpha
# :param x_n:
# :return:
# """
# return 2 * x_n * -1
# def L_al_la_d_la(x_n):
# """
# differential of L_al_la in terms of lambda
# :param x_n:
# :return:
# """
# return 2 * x_n
# def update_lambda(la,al,e, x_n):
# beta_1 = 0.023
# la-= e*L_al_la(la,al,x_n)-beta_1*L_al_la_d_la(x_n)
# if la < 0:
# la = 0
# return la
# def update_alpha(la,al,e, x_n):
# beta_2 = 10**8
# al-= e*L_al_la(la,al,x_n)-beta_2*L_al_la_d_al(x_n)
# # alpha cannot be negative
# if al < 0:
# al = 0
# return al
# @ param N: is the number of total population
# @ param total_time: the time that we want to simulate the process (6 month)
# @ param x_0: initial infected patients (greater than 0)
def simulation_infected(la, p, al, N, total_time, x_0):
# initialize time and infected patient
t_0 = 0
# bind variables to initial conditions
x_n = x_0
assert(x_n!=0)
t_n = t_0
# initiate X_n process and corresponding T_n process
X_n = [x_0]
T_n = [t_0]
# converge
conv =0
# loop
# break condition
# @ condition1: when the time exceeds the total time
# @ condition2: when the infected patients go to zero
# @ condition3: when the whole populations are infected! :(
while (t_n<total_time and
x_n != 0 and
x_n < N
):
i = x_n
# q i _ i+1
q_forward_i = la*p*2*x_n*(N-i)/(N*(N-1))
# q i _ i-1
q_backward_i = al*i
# waiting time rate v_i = (q i _ i+1) + (q i _ i-1)
v_i = q_forward_i + q_backward_i
t_i = np.random.exponential(v_i)
# jumping probability to STATE i+1 is (q i _ i+1)/v_i
jump = np.random.binomial(n=1,p=(q_forward_i/v_i))
if (jump ==1):
x_n += 1
else:
x_n -= 1
# increase time
t_n = t_n+t_i
# append Process
X_n.append(x_n)
T_n.append(t_n)
if len(X_n) > 1 and abs(x_n - X_n[-2]) < 1:
conv = 1
break
# update alpha and lambda
e = 0.01
def update_alpha(al):
beta_2 = 10**8
al-= e*v_i-beta_2* (p*2*x_n*(N-i)/(N*(N-1)))
# alpha cannot be negative
if al < 0:
al = 0
return al
def update_lambda(la):
beta_1 = 0.023
la-= e*v_i-beta_1*i
if la < 0:
la = 0
return la
al = update_alpha(al)
la = update_lambda(la)
return X_n,T_n,conv, al,la
e=0.01
para_alpha = 10**(-8)
para_lamda = 0.01
X_n,_,conv, para_alpha, para_lamda = simulation_infected(para_lamda,0.2,para_alpha,20000,100,10)
if conv ==1:
print('converge to C')
else:
print("couldn't converge")
print("alpha={}, lambda={}".format(para_alpha, para_lamda))
```
| github_jupyter |
```
import cv2
import glob
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
%matplotlib inline
left_top=[585, 456]
left_bottom =[253, 697]
right_top =[1061, 690]
right_bottom =[700, 456]
corners = np.float32([left_top,left_bottom, right_top,right_bottom])
offset = 150 #test the image to estimate a offset
img_size = (img.shape[1], img.shape[0])
src = np.float32(
[corners[0],
corners[1],
corners[2],
corners[3]])
#decide a place to place the birdviewed image, get these points by testing an image
dst = np.float32([
[offset, 0],
[offset, img_size[1]],
[img_size[0] - offset, img_size[1]],
[img_size[0] - offset,0]])
grad_thresh=[20,100]
s_thresh=[170,255]
import pickle
#load distortion matrix from camera_cal
with open('./camera_cal/wide_dist_pickle.p',mode='rb') as f:
dist_pickle = pickle.load(f)
mtx = dist_pickle['mtx']
dist = dist_pickle["dist"]
print(mtx)
print(dist)
print('loaded mtx matrix and distortion matrix from undistortion')
import glob
nx = 9
ny = 6
objp = np.zeros((nx*ny,3),np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images
objpoints= []# 3d points in real world space
imgpoints= []# 2d points in image space
#make a list of calibration images
images = glob.glob('./camera_cal/calibration*.jpg')
#Step through the list and search for chessboard corners
for frame in images:
img = cv2.imread(frame)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
def get_shresholded_img(image,grad_thresh,s_thresh):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
#process the x direction gradient
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= grad_thresh[0]) & (scaled_sobel <= grad_thresh[1])] = 1
#process the HIS s channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# one can show it out to see the colored binary
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
def undistort_image(image, objectpoints, imagepoints):
# Get image size
img_size = (image.shape[1], image.shape[0])
# Calibrate camera based on objectpoints, imagepoints, and image size
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objectpoints, imagepoints, img_size, None, None)
# Call cv2.undistort
dst = cv2.undistort(image, mtx, dist, None, mtx)
return dst
def warp_image_to_birdseye_view(image):
image_size=(image.shape[1], image.shape[0])
# Get perspective transform
perspectiveTransform = cv2.getPerspectiveTransform(src, dst)
# Warp perspective
warped = cv2.warpPerspective(image, perspectiveTransform, image_size, flags=cv2.INTER_LINEAR)
# Get the destination perspective transform
Minv = cv2.getPerspectiveTransform(dst, src)
return warped, Minv
def find_lane_lines(warped_binary_image, testing=False):
if testing == True:
# Create an output image to draw on and visualize the result
output_image = np.dstack((warped_binary_image, warped_binary_image, warped_binary_image))*255
# Create histogram to find the lanes by identifying the peaks in the histogram
histogram = np.sum(warped_binary_image[int(warped_binary_image.shape[0]/2):,:], axis=0)
# Find the peak of the left and right halves of the histogram
midpoint = np.int(histogram.shape[0]/2)
left_x_base = np.argmax(histogram[:midpoint])
right_x_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
number_of_windows = 9
# Set height of windows
window_height = np.int(warped_binary_image.shape[0]/number_of_windows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero_pixels = warped_binary_image.nonzero()
nonzero_y_pixels = np.array(nonzero_pixels[0])
nonzero_x_pixels = np.array(nonzero_pixels[1])
# Current positions to be updated for each window
left_x_current = left_x_base
right_x_current = right_x_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(number_of_windows):
# Identify window boundaries in x and y (and right and left)
win_y_low = warped_binary_image.shape[0] - (window+1)*window_height
win_y_high = warped_binary_image.shape[0] - window*window_height
win_x_left_low = left_x_current - margin
win_x_left_high = left_x_current + margin
win_x_right_low = right_x_current - margin
win_x_right_high = right_x_current + margin
if testing == True:
# Draw the windows on the visualization image
cv2.rectangle(output_image, (win_x_left_low,win_y_low), (win_x_left_high,win_y_high), (0,255,0), 2)
cv2.rectangle(output_image, (win_x_right_low,win_y_low), (win_x_right_high,win_y_high), (0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
left_inds = ((nonzero_y_pixels >= win_y_low) & (nonzero_y_pixels < win_y_high) & (nonzero_x_pixels >= win_x_left_low) & (nonzero_x_pixels < win_x_left_high)).nonzero()[0]
right_inds = ((nonzero_y_pixels >= win_y_low) & (nonzero_y_pixels < win_y_high) & (nonzero_x_pixels >= win_x_right_low) & (nonzero_x_pixels < win_x_right_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(left_inds)
right_lane_inds.append(right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(left_inds) > minpix:
left_x_current = np.int(np.mean(nonzero_x_pixels[left_inds]))
if len(right_inds) > minpix:
right_x_current = np.int(np.mean(nonzero_x_pixels[right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
left_x = nonzero_x_pixels[left_lane_inds]
left_y = nonzero_y_pixels[left_lane_inds]
right_x = nonzero_x_pixels[right_lane_inds]
right_y = nonzero_y_pixels[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(left_y, left_x, 2)
right_fit = np.polyfit(right_y, right_x, 2)
# Generate x and y values for plotting
plot_y = np.linspace(0, warped_binary_image.shape[0]-1, warped_binary_image.shape[0] )
left_fit_x = left_fit[0]*plot_y**2 + left_fit[1]*plot_y + left_fit[2]
right_fit_x = right_fit[0]*plot_y**2 + right_fit[1]*plot_y + right_fit[2]
# Get binary warped image size
image_size = warped_binary_image.shape
# Get max of plot_y
y_eval = np.max(plot_y)
# Define conversions in x and y from pixels space to meters
y_m_per_pix = 30/720
x_m_per_pix = 3.7/700
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(left_y*y_m_per_pix, left_x*x_m_per_pix, 2)
right_fit_cr = np.polyfit(right_y*y_m_per_pix, right_x*x_m_per_pix, 2)
# Calculate radius of curve
left_curve = ((1+(2*left_fit_cr[0]*y_eval*y_m_per_pix+left_fit_cr[1])**2)**1.5)/np.absolute(2*left_fit_cr[0])
right_curve = ((1+(2*right_fit_cr[0]*y_eval*y_m_per_pix+right_fit_cr[1])**2)**1.5)/np.absolute(2*right_fit_cr[0])
# Calculate lane deviation from center of lane
scene_height = image_size[0] * y_m_per_pix
scene_width = image_size[1] * x_m_per_pix
# Calculate the intercept points at the bottom of our image
left_intercept = left_fit_cr[0] * scene_height ** 2 + left_fit_cr[1] * scene_height + left_fit_cr[2]
right_intercept = right_fit_cr[0] * scene_height ** 2 + right_fit_cr[1] * scene_height + right_fit_cr[2]
center = (left_intercept + right_intercept) / 2.0
# Use intercept points to calculate the lane deviation of the vehicle
lane_deviation = (center - scene_width / 2.0)
if testing == True:
output_image[nonzero_y_pixels[left_lane_inds], nonzero_x_pixels[left_lane_inds]] = [255, 0, 0]
output_image[nonzero_y_pixels[right_lane_inds], nonzero_x_pixels[right_lane_inds]] = [0, 0, 255]
return left_fit_x, right_fit_x, plot_y, left_fit, right_fit, left_curve, right_curve, lane_deviation, output_image
else:
return left_fit_x, right_fit_x, plot_y, left_curve, right_curve, lane_deviation
def draw_lane_lines(warped_binary_image, undistorted_image, Minv):
# Create a blank image to draw the lines on
warp_zero = np.zeros_like(warped_binary_image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
left_fit_x, right_fit_x, ploty, left_radius, right_radius, lane_deviation=find_lane_lines(warped_binary_image)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fit_x, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fit_x, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image with green color
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
unwarp = cv2.warpPerspective(color_warp, Minv, (undistorted_image.shape[1], undistorted_image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undistorted_image, 1, unwarp, 0.3, 0)
# Write text on image
curvature_text = "Curvature: Left = " + str(np.round(left_radius, 2)) + ", Right = " + str(np.round(right_radius, 2))
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(result, curvature_text, (30, 60), font, 1, (0,255,0), 2)
deviation_text = "Lane deviation from center = {:.2f} m".format(lane_deviation)
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(result, deviation_text, (30, 90), font, 1, (0,255,0), 2)
return result
def process_image(image):
undistorted = undistort_image(image, objpoints, imgpoints)
combined_binary = get_shresholded_img(undistorted,grad_thresh,s_thresh)
binary_warped, Minv = warp_image_to_birdseye_view(combined_binary)
lane_lines_img = draw_lane_lines(binary_warped, undistorted, Minv)
return lane_lines_img
image=plt.imread('test_images/test5.jpg')
result=process_image(image)
plt.imshow(result)
image=plt.imread('test_images/test5.jpg')
with open('./camera_cal/wide_dist_pickle.p',mode='rb') as f:
dist_pickle = pickle.load(f)
mtx = dist_pickle['mtx']
dist = dist_pickle["dist"]
dst = cv2.undistort(image, mtx, dist, None, mtx)
combined_binary = get_shresholded_img(dst,grad_thresh,s_thresh)
def warp_image_to_birdseye_view(image):
image_size=(image.shape[1], image.shape[0])
# Get perspective transform
perspectiveTransform = cv2.getPerspectiveTransform(src, dst)
# Warp perspective
warped = cv2.warpPerspective(image, perspectiveTransform, image_size, flags=cv2.INTER_LINEAR)
# Get the destination perspective transform
Minv = cv2.getPerspectiveTransform(dst, src)
return warped, Minv
warped, Minv = warp_image_to_birdseye_view(combined_binary )
plt.figure(figsize=(20,10))
plt.imshow(warped)
from moviepy.editor import VideoFileClip
from IPython.display import HTML
!pip install moviepy
video_output = "output_images/project_video.mp4"
clip1 = VideoFileClip("project_video.mp4")
clip1_output = clip1.fl_image(process_image)
%time clip1_output.write_videofile(video_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(video_output))
```
| github_jupyter |
<table style="float:left; border:none">
<tr style="border:none">
<td style="border:none">
<a href="https://bokeh.org/">
<img
src="assets/bokeh-transparent.png"
style="width:50px"
>
</a>
</td>
<td style="border:none">
<h1>Bokeh Tutorial</h1>
</td>
</tr>
</table>
<div style="float:right;"><h2>08. Graph and Network Plots</h2></div>
This chapter will cover how to plot network node/link graphs in Bokeh using NetworkX. For information on creating graph renderers from a low level, see [Visualizing Network Graphs](https://docs.bokeh.org/en/latest/docs/user_guide/graph.html)
```
from bokeh.io import show, output_notebook
from bokeh.plotting import figure
output_notebook()
```
## Plotting from NetworkX
The easiest way to plot network graphs with Bokeh is to use the `from_networkx` function. This function accepts any NetworkX graph and returns a Bokeh `GraphRenderer` that can be added to a plot. The `GraphRenderer` has `node_renderer` and `edge_renderer` properties that contain the Bokeh renderers that draw the nodes and edges, respectively.
The example below shows a Bokeh plot of `nx.desargues_graph()`, setting some of the node and edge properties.
```
import networkx as nx
from bokeh.models import Range1d, Plot
from bokeh.plotting import from_networkx
G = nx.desargues_graph()
# We could use figure here but don't want all the axes and titles
plot = Plot(x_range=Range1d(-2, 2), y_range=Range1d(-2, 2))
# Create a Bokeh graph from the NetworkX input using nx.spring_layout
graph = from_networkx(G, nx.spring_layout, scale=1.8, center=(0,0))
plot.renderers.append(graph)
# Set some of the default node glyph (Circle) properties
graph.node_renderer.glyph.update(size=20, fill_color="orange")
# Set some edge properties too
graph.edge_renderer.glyph.line_dash = [2,2]
show(plot)
# Exercise: try a different NetworkX layout, and set some properies on `graph.edge_renderer.glyph`
# and `graph.node_renderer.glyph`
```
## Adding Extra Data Columns.
The `node_renderer` and `edge_renderer` properties of the graph renderer each have a `data_source` that is a standard `ColumnDataSource` that you can add new data to, e.g. to drive a hover tool, or to specify colors for the renderer. The example below demonstates both.
```
from bokeh.models import HoverTool
from bokeh.palettes import Category20_20
G = nx.desargues_graph() # always 20 nodes
# We could use figure here but don't want all the axes and titles
plot = Plot(x_range=Range1d(-2, 2), y_range=Range1d(-2, 2))
# Create a Bokeh graph from the NetworkX input using nx.spring_layout
graph = from_networkx(G, nx.spring_layout, scale=1.8, center=(0,0))
plot.renderers.append(graph)
# Add some new columns to the node renderer data source
graph.node_renderer.data_source.data['index'] = list(range(len(G)))
graph.node_renderer.data_source.data['colors'] = Category20_20
graph.node_renderer.glyph.update(size=20, fill_color="colors")
plot.add_tools(HoverTool(tooltips="index: @index"))
show(plot)
# Exercise: Add your own columns for other node or edge properties e.g. fill_alpha or line_color,
# or to show other fields in a tooltoip
```
## Inspection and Selection Policies
Bokeh graph renderers have `inspection_policy` and `selection_policy` properties. These can be used to control how hover inspections highlight the graph, or how selection tools make selections. These properties may be set to any of the inpection policies in `bokeh.graphs`. For instance, if a user hovers over a node, you may wish to highlight all the associated edges as well. This can be accomplished by setting the inspection policy:
graph.inspection_policy = NodesAndLinkedEdges()
as the example below demonstrates.
```
from bokeh.models.graphs import NodesAndLinkedEdges
from bokeh.models import Circle, HoverTool, MultiLine
G = nx.gnm_random_graph(15, 30)
# We could use figure here but don't want all the axes and titles
plot = Plot(x_range=Range1d(-2, 2), y_range=Range1d(-2 ,2))
# Create a Bokeh graph from the NetworkX input using nx.spring_layout
graph = from_networkx(G, nx.spring_layout, scale=1.8, center=(0,0))
plot.renderers.append(graph)
# Blue circles for nodes, and light grey lines for edges
graph.node_renderer.glyph = Circle(size=25, fill_color='#2b83ba')
graph.edge_renderer.glyph = MultiLine(line_color="#cccccc", line_alpha=0.8, line_width=2)
# green hover for both nodes and edges
graph.node_renderer.hover_glyph = Circle(size=25, fill_color='#abdda4')
graph.edge_renderer.hover_glyph = MultiLine(line_color='#abdda4', line_width=4)
# When we hover over nodes, highlight adjecent edges too
graph.inspection_policy = NodesAndLinkedEdges()
plot.add_tools(HoverTool(tooltips=None))
show(plot)
# Exercise: try a different inspection (or selection) policy like NodesOnly or EdgesAndLinkedNodes
```
# Next Section
Click on this link to go to the next notebook: [09 - Geographic Plots](09%20-%20Geographic%20Plots.ipynb).
To go back to the overview, click [here](00%20-%20Introduction%20and%20Setup.ipynb).
| github_jupyter |
```
from pathlib import Path
import os
import shlex
import shutil
import subprocess
import pandas as pd
names_rows_stability = [
['dg', 1], # totalEnergy
['backbone_hbond', 2],
['sidechain_hbond', 3],
['van_der_waals', 4],
['electrostatics', 5],
['solvation_polar', 6],
['solvation_hydrophobic', 7],
['van_der_waals_clashes', 8],
['entropy_sidechain', 9],
['entropy_mainchain', 10],
['sloop_entropy', 11],
['mloop_entropy', 12],
['cis_bond', 13],
['torsional_clash', 14],
['backbone_clash', 15],
['helix_dipole', 16],
['water_bridge', 17],
['disulfide', 18],
['electrostatic_kon', 19],
['partial_covalent_bonds', 20],
['energy_ionisation', 21],
['entropy_complex', 22],
['number_of_residues', 23],
['interface_residues', 24],
['interface_residues_clashing', 25],
['interface_residues_vdw_clashing', 26],
['interface_residues_bb_clashing', 27]
]
names_rows_stability_complex = ([
['intraclashes_energy_1', 3],
['intraclashes_energy_2', 4],
] + [[x[0], x[1] + 4] for x in names_rows_stability])
names_stability_complex = list(next(zip(*names_rows_stability_complex)))
names_stability_complex_wt = [name + '_wt'
for name in names_stability_complex[:-5]] + \
['number_of_residues', 'interface_residues_wt', 'interface_residues_clashing_wt',
'interface_residues_vdw_clashing_wt', 'interface_residues_bb_clashing_wt']
names_stability_complex_mut = [name + '_mut'
for name in names_stability_complex[:-5]] + \
['number_of_residues', 'interface_residues_mut', 'interface_residues_clashing_mut',
'interface_residues_vdw_clashing_mut', 'interface_residues_bb_clashing_mut']
def _export_foldxpath(path_to_export):
# export PATH=$PATH:/path/to/folder
if str(path_to_export) not in os.environ["PATH"]:
os.environ["PATH"] += ":" + str(path_to_export)
print("foldx path exported")
else:
print("foldx bin folder already in PATH")
def _rotabase_symlink(rotabase_path):
# rotabase symlink
sym_rotabase = Path("rotabase.txt")
if not sym_rotabase.is_symlink():
sym_rotabase.symlink_to(rotabase_path)
print("Symlink to rotabase.txt create on working dir")
else:
print("rotabase.txt symlink already exist on working dir")
def read_analyse_complex(output_file):
df = pd.read_csv(output_file, sep='\t', index_col=False, skiprows=8)
# Format dataframe
df = df.rename(columns=lambda s: s.lower().replace(' ', '_'))
#logger.debug(df.head())
assert df.shape[0] == 1
result = df.drop(pd.Index(['pdb', 'group1', 'group2']), axis=1).iloc[0].tolist()
return result
def convert_features_to_differences(df, keep_mut=False):
"""Convert `_wt` and `_mut` columns into `_wt` and `_change` columns.
Create a new set of features (ending in `_change`) that describe the difference between values
of the wildtype (features ending in `_wt`) and mutant (features ending in `_mut`) features.
If `keep_mut` is `False`, removes all mutant features (features ending in `_mut`).
"""
column_list = []
for column_name, column in df.iteritems():
if ('_mut' in column_name and column_name.replace('_mut', '_wt') in df.columns and
df[column_name].dtype != object):
if keep_mut:
column_list.append(column)
new_column = column - df[column_name.replace('_mut', '_wt')]
if 'secondary_structure' in column_name:
new_column = new_column.apply(lambda x: 1 if x else 0)
new_column.name = column_name.replace('_mut', '_change')
column_list.append(new_column)
else:
column_list.append(column)
new_df = pd.concat(column_list, axis=1)
return new_df
foldx_exe = "/mnt/d/Python_projects/AbPred/libs/foldx5Linux64/"
class FoldX:
def __init__(self, foldx_dir=None, verbose=True):
self._tempdir = Path(foldx_exe)
_export_foldxpath(self._tempdir)
#self.verbose = verbose
self.pdbfile = None
def _run(self, cmd, **options):
""" ********************************************
*** ***
*** FoldX 4 (c) ***
*** ***
*** code by the FoldX Consortium ***
*** ***
*** Jesper Borg, Frederic Rousseau ***
*** Joost Schymkowitz, Luis Serrano ***
*** Peter Vanhee, Erik Verschueren ***
*** Lies Baeten, Javier Delgado ***
*** and Francois Stricher ***
*** and any other of the 9! permutations ***
*** based on an original concept by ***
*** Raphael Guerois and Luis Serrano ***
********************************************
FoldX program options:
Basic OPTIONS:
-v [ --version ] arg (=Version beta 4)
print version string
-h [ --help ] produce help message
-c [ --command ] arg Choose your FoldX Command:
AlaScan
AnalyseComplex
BuildModel
CrystalWaters
Dihedrals
DNAContact
DNAScan
LoopReconstruction
MetalBinding
Optimize
PDBFile
PepX
PositionScan
PrintNetworks
Pssm
QualityAssessment
ReconstructSideChains
RepairPDB
Rmsd
SequenceDetail
SequenceOnly
Stability
-f [ --config ] arg config file location
-d [ --debug ] arg Debug, produces more output
Generic OPTIONS:
--pdb arg (="")
--pdb-list arg (="") File with a list of PDB files
--pdb-dir arg (="./") PDB directory
--output-dir arg (="./") OutPut directory
--output-file arg (="") OutPut file
--queue arg cluster queue: fast, normal, infinity,
highmem, all.q
--clean-mode arg (=0) FoldX clean mode: none, all, output or
pdb
--max-nr-retries arg (=1) Maximum number of retries of a FoldX
command if not finished successfully.
Especially important to set at least to
two when working on a cluster and file
transfers often fail.
--skip-build arg (=0) Skip the build step in the algorithm
FoldX OPTIONS:
input:
--fixSideChains arg allows FoldX to complete missing
sidechains at read-time, defaults to
true
--rotabaseLocation arg set the location of the rotabase,
defaults to rotabase.txt
--noCterm arg set whether the last residue in a list
of peptides (ex:ABC) shouldn't be
considered as the C-terminal (i.e.,
have an OXT), defaults to none
--noNterm arg set whether the first residue in a list
peptides (ex: ABC) shouldn't be
considered as the N-Terminal (i.e.,
have a third proton on the N), defaults
to none
output:
--screen arg (=1) sets screen output, defaults to true
--overwriteBatch arg (=1) set to overwrite or not the specific
name given as the first value in a
command, defaults to true
--noHeader arg (=0) remove standard FoldX Header from
outputs, defaults to false
PDB output:
--out-pdb arg (=1) set to output PDBs when doing
mutations, defaults to true
--pdbHydrogens arg (=0) output the hydrogens we place in the
generated pdbs, defaults to false
--pdbWaters arg (=0) output the predicted water bridges in
the generated pdbs, defaults to false
--pdbIons arg (=0) output the predicted metal ions in the
generated pdbs, defaults to false
--pdbDummys arg (=0) output the the dummy atoms we use (for
N and C caps of helixes as well as the
free orbitals) in the generated pdbs,
defaults to false
--pdbIsoforms arg (=0) output the isoforms of the His in the
generated pdbs, defaults to false
physico chemical parameters:
--temperature arg set the temperature (in K) of the
calculations, defaults to 298 K
--pH arg set the pH of the calculations,
defaults to 7
--ionStrength arg set the ionic strength of the
calculations, defaults to 0.05
force-field:
--vdwDesign arg set VdWDesign of the experiment,
defaults to 2 ( 0 very soft, 1 medium
soft, 2 strong used for design )
--clashCapDesign arg set maximun penalty per atom of the van
der waals' clashes, defaults set to 5.0
--backBoneAtoms arg consider only backbone atoms for all
energy calculations, defaults to false
--dipoles arg set to consider helices dipoles,
defaults to true
--complexClashes arg set the threshold (in kcal/mol) for
counting clashing aminoacids at the
interface, defaults to 1.
entropy calculations:
--fullMainEntropy arg set to maximally penalize the main
chain of ligand and protein (usefull
when comparing peptide data with
poly-Alanine backbones), defaults to
false
water and ion evaluations:
--water arg set how FoldX considers waters:
-CRYSTAL (read the pdb waters) -PREDICT
(predict water bridges from sratch)
-IGNORE (don't consider waters)
-COMPARE, defaults to -IGNORE
complex options:
--complexWithDNA arg set to consider only two groups in a
protein-DNA complex, DNA + protein,
defaults to false
algorithm specific parameters:
--moveNeighbours arg set to move neighbours when we mutate,
defaults to true
--numberOfRuns arg set the number of runs done in
BuidModel, defaults to 1
--fitAtoms arg set atoms involved in the RMSD command
BB(backbone atoms), CA(Calpha),
CA_CB(both Calpha and Cbeta),
N_CA_O(N,Calpha and O), defaults to BB
--rmsdPDB arg print out the rotated target of the
RMSD command, defaults to true
--repair_Interface arg set to limit RepairPDB when applying to
a complex: ALL(repair all residues
including interface), ONLY(repair only
the interface), NONE(no repair of the
interface), defaults to ALL
--burialLimit arg set a burial limit under which a
residue is not repaired, defaults to 1.
(inactive)
--bFactorLimit arg set a relative bFactor limit above
which a residue is not repaired,
defaults to 0. (inactive)"""
if options:
for key, value in options.items():
cmd.extend(["--" + key, value])
p = subprocess.Popen(shlex.split(cmd), universal_newlines=True, shell=False, stdout=subprocess.PIPE)
while True:
out = p.stdout.readline()
if not out and p.poll() is not None:
break
if self.verbose and out:
print(out.splitlines()[0])
def _run(self,cmd):
# call external program on `filename`
fout = open("stdout_{}.txt".format(self.pdbfile[:-4]),"w")
subprocess.check_call(shlex.split(cmd),stdout=fout)
fout.close()
def repair_pdb(self, pdbfile):
"""Run FoldX ``RepairPDB`` """
pdb = Path(pdbfile).absolute()
self.pdbfile = pdb.name
command = ("foldx --command=RepairPDB --pdb={}".format(self.pdbfile))
self._run(command)
def analyse_complex(self, pdb_file, partners):
"""Run FoldX ``AnalyseComplex``."""
pdb = Path(pdb_file).absolute()
pdb_name = pdb.name[:-4]
partner1 = partners.split('_')[0]
partner2 = partners.split('_')[1]
command = ("foldx --command=AnalyseComplex --pdb={} ".format(pdb.name) +
"--analyseComplexChains={},{} ".format(partner1, partner2))
self._run(command)
output_file = pdb.parent.joinpath('Interaction_%s_AC.fxout' % pdb_name)
result = read_analyse_complex(output_file)
return result
def point_mutations(self, pdb_file, partners, to_mutate, mutations):
"""Run FoldX ``Pssm``.
Parameters
----------
to_mutate:
Mutation specified in the following format:
{mutation.residue_wt}{chain_id}{residue_id}
mutations:
Mutant residues
"""
pdb = Path(pdb_file).absolute()
pdb_mutation = pdb.name[:-4]+'_'+to_mutate+mutations
partner1 = partners.split('_')[0]
partner2 = partners.split('_')[1]
command = ("foldx --command=Pssm --pdb={} ".format(pdb.name) +
"--analyseComplexChains={},{} ".format(partner1, partner2) +
"--positions={}a ".format(to_mutate) + "--aminoacids={} ".format(mutations) +
'--output-file={}'.format(pdb_mutation))
self._run(command)
# Copy foldX result to mantain local copy
wt_result = Path('WT_{}_1.pdb'.format(pdb.name[:-4]))
mut_result = Path('{}_1.pdb'.format(pdb.name[:-4]))
wt_rename = Path('{}-{}-wt.pdb'.format(pdb.name[:-4], to_mutate+mutations))
mut_rename = Path('{}-{}-mut.pdb'.format(pdb.name[:-4], to_mutate+mutations))
shutil.copy(wt_result, wt_rename)
shutil.copy(mut_result, mut_rename)
def build_model(self, pdb_file, foldx_mutation):
pdb = Path(pdb_file).absolute()
mutation_file = self._get_mutation_file(pdb_file, foldx_mutation)
command = ("foldx --command=BuildModel --pdb='{}' ".format(pdb.name) +
"--mutant-file='{}'".format(mutation_file))
self._run(command)
# Copy foldX result to mantain local copy
wt_result = Path('WT_{}_1.pdb'.format(pdb.name[:-4]))
mut_result = Path('{}_1.pdb'.format(pdb.name[:-4]))
wt_rename = Path('{}-{}-wt.pdb'.format(pdb.name[:-4], foldx_mutation))
mut_rename = Path('{}-{}-mut.pdb'.format(pdb.name[:-4], foldx_mutation))
shutil.copy(wt_result, wt_rename)
shutil.copy(mut_result, mut_rename)
def _get_mutation_file(self, pdb_file, foldx_mutation):
"""
Parameters
----------
foldx_mutation:
Mutation specified in the following format:
{mutation.residue_wt}{chain_id}{residue_id}{mutation.residue_mut}
"""
pdb = Path(pdb_file).absolute()
mutation_file = Path('individual_list_{}_{}.txt'.format(pdb.name[:-4], foldx_mutation))
mutation_file.write_text('{};\n'.format(foldx_mutation))
return mutation_file
```
```
PDBS_DIR = Path("out_models/")
pdbs_paths = list(PDBS_DIR.glob("*mut.pdb"))
subprocess.DEVNULL?
### form 1
procs = []
for p in range(2):
pdb = Path("VRC01.pdb").absolute()
command = ("foldx --command=RepairPDB --pdb={}".format(pdb.name))
fout = open("stdout_%d.txt" % p,'w')
p = subprocess.Popen(shlex.split(command), stdout=fout)
fout.close()
procs.append(p)
for p in procs:
p.wait()
f.name
# form 2
import os
import concurrent.futures
def run(command):
... # call external program on `filename`
command = shlex.split(command)
fout = open("stdout_{}.txt".format(f.name),"w")
subprocess.check_call(command,stdout=fout)
fout.close()
def repair_pdb(pdbfile):
pdb = Path(pdbfile).absolute()
command = ("foldx --command=RepairPDB --pdb={}".format(pdb.name))
run(command)
# populate files
pdbs_paths = list(PDBS_DIR.glob("*mut.pdb"))[:10]
CWD = os.getcwd()
try:
os.chdir(PDBS_DIR)
# start threads
with concurrent.futures.ProcessPoolExecutor(max_workers=3) as executor:
future_to_file = dict((executor.submit(repair_pdb, f), f) for f in pdbs_paths)
for future in concurrent.futures.as_completed(future_to_file):
f = future_to_file[future]
if future.exception() is not None:
print('%r generated an exception: %s' % (f, future.exception()))
# run() doesn't return anything so `future.result()` is always `None`
finally:
os.chdir(CWD)
foldx.
# form 2 with foldx class
pdbs_paths = list(PDBS_DIR.glob("*mut.pdb"))[:10]
CWD = os.getcwd()
try:
os.chdir(PDBS_DIR)
# start threads
foldx = FoldX()
with concurrent.futures.ProcessPoolExecutor(max_workers=3) as executor:
future_to_file = dict((executor.submit(foldx.repair_pdb, f), f) for f in pdbs_paths)
for future in concurrent.futures.as_completed(future_to_file):
f = future_to_file[future]
if future.exception() is not None:
print('%r generated an exception: %s' % (f, future.exception()))
# run() doesn't return anything so `future.result()` is always `None`
finally:
os.chdir(CWD)
concurrent.futures.as_completed?
```
# Testing foldx class
```
foldx = FoldX(verbose=True)
foldx.repair_pdb(pdb_file="VRC01.pdb")
pdbs_to_repair = PDBS_DIR.glob("*.pdb")
try:
os.chdir(PDBS_DIR)
#create symlink to rotabase.txt
rotabase_symlink(ROTABASE)
(PDBS_DIR.glob("*.pdb"))
for pdb in pdbs_to_repair:
options = {"command":"RepairPDB","repair_Interface":"ONLY","pdb":str(pdb.name)}
FoldX(exe="foldx",verbose=True,**options).run()
finally:
os.chdir(CWD)
subprocess.Popen?
```
| github_jupyter |
# Quantum Katas and Tutorials as Jupyter Notebooks
To run the katas and tutorials online, make sure you're viewing this file on Binder (if not, use [this link](https://mybinder.org/v2/gh/Microsoft/QuantumKatas/main?urlpath=/notebooks/index.ipynb)).
To run the katas and tutorials locally, follow [these installation instructions](https://github.com/microsoft/QuantumKatas/blob/main/README.md#kata-locally).
> While running the Katas online is the easiest option to get started, if you want to save your progress and enjoy better performance, we recommend you to choose the local option.
## Learning path
Here is the learning path we suggest you to follow if you are starting to learn quantum computing and quantum programming. Once you're comfortable with the basics, you're welcome to jump ahead to the topics that pique your interest!
#### Quantum Computing Concepts: Qubits and Gates
* **[Complex arithmetic (tutorial)](./tutorials/ComplexArithmetic/ComplexArithmetic.ipynb)**.
Learn about complex numbers and the mathematics required to work with quantum computing.
* **[Linear algebra (tutorial)](./tutorials/LinearAlgebra/LinearAlgebra.ipynb)**.
Learn about vectors and matrices used to represent quantum states and quantum operations.
* **[The qubit (tutorial)](./tutorials/Qubit/Qubit.ipynb)**.
Learn what a qubit is.
* **[Single-qubit gates (tutorial)](./tutorials/SingleQubitGates/SingleQubitGates.ipynb)**.
Learn what a quantum gate is and about the most common single-qubit gates.
* **[Basic quantum computing gates](./BasicGates/BasicGates.ipynb)**.
Learn to apply the most common gates used in quantum computing.
* **[Multi-qubit systems (tutorial)](./tutorials/MultiQubitSystems/MultiQubitSystems.ipynb)**.
Learn to represent multi-qubit systems.
* **[Multi-qubit gates (tutorial)](./tutorials/MultiQubitGates/MultiQubitGates.ipynb)**.
Learn about the most common multi-qubit gates.
* **[Superposition](./Superposition/Superposition.ipynb)**.
Learn to prepare superposition states.
#### Quantum Computing Concepts: Measurements
* **[Single-qubit measurements (tutorial)](./tutorials/SingleQubitSystemMeasurements/SingleQubitSystemMeasurements.ipynb)**.
Learn what quantum measurement is and how to use it for single-qubit systems.
* **[Multi-qubit measurements (tutorial)](./tutorials/MultiQubitSystemMeasurements/MultiQubitSystemMeasurements.ipynb)**.
Learn to use measurements for multi-qubit systems.
* **[Measurements](./Measurements/Measurements.ipynb)**.
Learn to distinguish quantum states using measurements.
* **[Distinguish unitaries](./DistinguishUnitaries/DistinguishUnitaries.ipynb)**\*.
Learn to distinguish unitaries by designing and performing experiments with them.
* **[Joint measurements](./JointMeasurements/JointMeasurements.ipynb)**\*.
Learn about using joint (parity) measurements to distinguish quantum states and to perform state transformations.
#### Q\# and Microsoft Quantum Development Kit Tools
* **[Visualization tools (tutorial)](./tutorials/VisualizationTools/VisualizationTools.ipynb)**.
Learn to use the various tools for visualizing elements of Q\# programs.
#### Simple Algorithms
* **[Random number generation (tutorial)](./tutorials/RandomNumberGeneration/RandomNumberGenerationTutorial.ipynb)**.
Learn to generate random numbers using the principles of quantum computing.
* **[Teleportation](./Teleportation/Teleportation.ipynb)**.
Implement standard teleportation protocol and its variations.
* **[Superdense coding](./SuperdenseCoding/SuperdenseCoding.ipynb)**.
Implement the superdense coding protocol.
#### Quantum Oracles and Simple Oracle Algorithms
* **[Quantum oracles (tutorial)](./tutorials/Oracles/Oracles.ipynb)**.
Learn to implement classical functions as equivalent quantum oracles.
* **[Exploring Deutsch and Deutsch–Jozsa algorithms (tutorial)](./tutorials/ExploringDeutschJozsaAlgorithm/DeutschJozsaAlgorithmTutorial_P1.ipynb)**.
Learn to implement classical functions and equivalent quantum oracles,
and compare the quantum solution to the Deutsch–Jozsa problem to a classical one.
* **[Deutsch–Jozsa algorithm](./DeutschJozsaAlgorithm/DeutschJozsaAlgorithm.ipynb)**.
Learn about quantum oracles which implement classical functions, and implement Bernstein–Vazirani and Deutsch–Jozsa algorithms.
#### Grover's search algorithm
* **[Implementing Grover's algorithm](./GroversAlgorithm/GroversAlgorithm.ipynb)**.
Learn about Grover's search algorithm and how to write quantum oracles to use with it.
* **[Exploring Grover's search algorithm (tutorial)](./tutorials/ExploringGroversAlgorithm/ExploringGroversAlgorithmTutorial.ipynb)**.
Learn more about Grover's search algorithm, picking up where the [Grover's algorithm kata](./GroversAlgorithm/GroversAlgorithm.ipynb) left off.
* **[Solving SAT problems using Grover's algorithm](./SolveSATWithGrover/SolveSATWithGrover.ipynb)**.
Explore Grover's search algorithm, using SAT problems as an example.
Learn to implement quantum oracles based on the problem description instead of a hard-coded answer.
Use Grover's algorithm to solve problems with an unknown number of solutions.
* **[Solving graph coloring problems using Grover's algorithm](./GraphColoring/GraphColoring.ipynb)**.
Continue the exploration of Grover's search algorithm, using graph coloring problems as an example.
* **[Solving bounded knapsack problem using Grover's algorithm](./BoundedKnapsack/BoundedKnapsack.ipynb)**.
Learn how solve the variants of knapsack problem with Grover's search.
#### Tools and libraries/Building up to Shor's algorithm
* **[Quantum Fourier transform](./QFT/QFT.ipynb)**.
Learn to implement quantum Fourier transform and to use it to perform simple state transformations.
* **[Phase estimation](./PhaseEstimation/PhaseEstimation.ipynb)**.
Learn about phase estimation algorithms.
#### Entanglement games
* **[CHSH game](./CHSHGame/CHSHGame.ipynb)**.
* **[GHZ game](./GHZGame/GHZGame.ipynb)**.
* **[Mermin-Peres magic square game](./MagicSquareGame/MagicSquareGame.ipynb)**.
#### Reversible computing
* **[Truth tables](./TruthTables/TruthTables.ipynb)**.
Learn to represent and manipulate Boolean functions as truth tables and to implement them as quantum operations.
* **[Ripple-carry adder](./RippleCarryAdder/RippleCarryAdder.ipynb)**.
Build a ripple-carry adder on a quantum computer.
#### Miscellaneous
* **[BB84 protocol](./KeyDistribution_BB84/KeyDistribution_BB84.ipynb)**.
Implement the BB84 key distribution algorithm.
* **[Bit-flip error correcting code](./QEC_BitFlipCode/QEC_BitFlipCode.ipynb)**.
Learn about a 3-qubit error correcting code for protecting against bit-flip errors.
* **[Unitary patterns](./UnitaryPatterns/UnitaryPatterns.ipynb)**.
Learn to implement unitaries with matrices that follow certain patterns of zero and non-zero elements.
* **[Quantum classification (tutorial)](./tutorials/QuantumClassification/ExploringQuantumClassificationLibrary.ipynb)**.
Learn about circuit-centric classifiers and the quantum machine learning library included in the QDK.
For a full list of Quantum Katas available as Q# projects instead of Jupyter Notebooks, see the [QuantumKatas repository](https://github.com/Microsoft/QuantumKatas#learning-path).
## Getting Started with Kata Notebooks and Tutorials
Each kata notebook presents the tasks of the respective kata (Q# project) in Jupyter Notebook format. This makes getting started with the katas a lot easier - you don't need to install anything locally to try them out!
Notebook tutorials are designed with Notebook format in mind - in addition to programming exercises they include a lot of theoretical explanations and code samples for you to learn from.
Make sure you're viewing this file when running Jupyter notebooks on your machine or on Binder (for running on Binder, use [this link](https://mybinder.org/v2/gh/Microsoft/QuantumKatas/main?urlpath=/notebooks/index.ipynb)). From here you can navigate to the individual kata or tutorial notebooks using the links above.
* Each tutorial or kata notebook contains a sequence of tasks on the topic, progressing from trivial to challenging.
* Each task is defined in a separate code cell, preceded by the description of the task in a Markdown cell.
Your goal is to fill in the blanks in the code (marked with `// ...` comments) with some Q# code that solves the task.
* To verify your solution, run the code cell using Ctrl + Enter (or ⌘ + Enter on macOS). This will invoke the test covering the task and let you know whether it passes or fails, and if it fails, what the error is.
* You can find pointers to reference materials you might need to solve the tasks, both on quantum computing and on Q#, either in the beginning of the tutorial or the kata or next to the task to which they are relevant.
* You can find reference solutions in `ReferenceImplementation.qs` files of the corresponding katas or tutorials.
* A lot of katas and tutorials have *workbooks* - detailed explanations of task solutions. Feel free to look them up if you're stuck!
| github_jupyter |
# Classifying OUV using NGram features and MLP
## Imports
```
import sys
sys.executable
from argparse import Namespace
from collections import Counter
import json
import os
import re
import string
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm.notebook import tqdm
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
from scipy.special import softmax
import pickle
import matplotlib.pyplot as plt
import torch.autograd.profiler as profiler
import torchtext
from torchtext.data import get_tokenizer
tokenizer = get_tokenizer('spacy')
print("PyTorch version {}".format(torch.__version__))
print("GPU-enabled installation? {}".format(torch.cuda.is_available()))
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print(device)
```
## Data Vectorization Classes
### The Vocabulary
```
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
add_unk (bool): a flag that indicates whether to add the UNK token
unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
"""### The Vectorizer"""
def sparse_to_tensor(M):
"""
input: M is Scipy sparse matrix
output: pytorch sparse tensor in GPU
"""
M = M.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack((M.row, M.col))).long()
values = torch.from_numpy(M.data)
shape = torch.Size(M.shape)
Ms = torch.sparse.FloatTensor(indices, values, shape)
return Ms.to_dense().to(args.device)
```
### The Vectorizer
```
def ngrams_iterator(token_list, ngrams):
"""Return an iterator that yields the given tokens and their ngrams.
Arguments:
token_list: A list of tokens
ngrams: the number of ngrams.
Examples:
>>> token_list = ['here', 'we', 'are']
>>> list(ngrams_iterator(token_list, 2))
>>> ['here', 'here we', 'we', 'we are', 'are']
"""
def _get_ngrams(n):
return zip(*[token_list[i:] for i in range(n)])
for x in token_list:
yield x
for n in range(2, ngrams + 1):
for x in _get_ngrams(n):
yield ' '.join(x)
# Vectorization parameters
# Range (inclusive) of n-gram sizes for tokenizing text.
NGRAM_RANGE = (1, 2)
# Limit on the number of features. We use the top 20K features.
TOP_K = 20000
# Whether text should be split into word or character n-grams.
# One of 'word', 'char'.
TOKEN_MODE = 'word'
# Minimum document/corpus frequency below which a token will be discarded.
MIN_DOCUMENT_FREQUENCY = 2
def sparse_to_tensor(M):
"""
input: M is Scipy sparse matrix
output: pytorch sparse tensor in GPU
"""
M = M.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack((M.row, M.col))).long()
values = torch.from_numpy(M.data)
shape = torch.Size(M.shape)
Ms = torch.sparse.FloatTensor(indices, values, shape)
return Ms.to_dense().to(args.device)
class OuvVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, ouv_vocab, ngrams, vectorizer):
"""
Args:
review_vocab (Vocabulary): maps words to integers
"""
self.ouv_vocab = ouv_vocab
self.ngrams = ngrams
self.vectorizer = vectorizer
def vectorize(self, data):
"""Create a tf_idf vector for the ouv data
Args:
data (str): the ouv description data
ngrams (int): the maximum ngram value
Returns:
tf_idf (np.ndarray): the tf-idf encoding
"""
data = [data]
tf_idf = self.vectorizer.transform(data)
return sparse_to_tensor(tf_idf)[0]
@classmethod
def from_dataframe(cls, ouv_df, ngrams, cutoff=5):
"""Instantiate the vectorizer from the dataset dataframe
Args:
ouv_df (pandas.DataFrame): the ouv dataset
cutoff (int): the parameter for frequency-based filtering
ngrams (int): the maximum ngram value
Returns:
an instance of the OuvVectorizer
"""
ouv_vocab = Vocabulary(add_unk=True)
corpus=[]
# Add top words if count > provided count
word_counts = Counter()
for data in ouv_df.data:
corpus.append(data)
for word in ngrams_iterator(data.split(' '),ngrams=ngrams):
if word not in string.punctuation:
word_counts[word] += 1
for word, count in word_counts.items():
if count > cutoff:
ouv_vocab.add_token(word)
# Create keyword arguments to pass to the 'tf-idf' vectorizer.
kwargs = {
'ngram_range': NGRAM_RANGE, # Use 1-grams + 2-grams.
'dtype': 'int32',
'strip_accents': 'unicode',
'decode_error': 'replace',
'analyzer': TOKEN_MODE, # Split text into word tokens.
'min_df': MIN_DOCUMENT_FREQUENCY,
}
vectorizer = TfidfVectorizer(**kwargs)
# Learn vocabulary from training texts and vectorize training texts.
vectorizer.fit_transform(corpus).astype('float32')
return cls(ouv_vocab, ngrams, vectorizer)
@classmethod
def from_serializable(cls, contents, ngrams, vectorizer):
"""Instantiate a OuvVectorizer from a serializable dictionary
Args:
contents (dict): the serializable dictionary
Returns:
an instance of the OuvVectorizer class
"""
ouv_vocab = Vocabulary.from_serializable(contents['ouv_vocab'])
return cls(ouv_vocab=ouv_vocab, ngrams=ngrams, vectorizer = vectorizer)
def to_serializable(self):
"""Create the serializable dictionary for caching
Returns:
contents (dict): the serializable dictionary
"""
return {'ouv_vocab': self.ouv_vocab.to_serializable()}
```
### The Dataset
```
class OuvDataset(Dataset):
def __init__(self, ouv_df, vectorizer):
"""
Args:
ouv_df (pandas.DataFrame): the dataset
vectorizer (ReviewVectorizer): vectorizer instantiated from dataset
"""
self.ouv_df = ouv_df
self._vectorizer = vectorizer
self.train_df = self.ouv_df[self.ouv_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.ouv_df[self.ouv_df.split=='dev']
self.validation_size = len(self.val_df)
self.test_df = self.ouv_df[self.ouv_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
@classmethod
def load_dataset_and_make_vectorizer(cls, ouv_csv, ngrams, cutoff):
"""Load dataset and make a new vectorizer from scratch
Args:
ouv_csv (str): location of the dataset
Returns:
an instance of OuvDataset
"""
ouv_df = pd.read_csv(ouv_csv)
train_ouv_df = ouv_df[ouv_df.split=='train']
return cls(ouv_df, OuvVectorizer.from_dataframe(train_ouv_df,ngrams=ngrams, cutoff=cutoff))
@classmethod
def load_dataset_and_load_vectorizer(cls, ouv_csv, vectorizer_filepath, ngrams, vectorizer):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
ouv_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of OuvDataset
"""
ouv_df = pd.read_csv(ouv_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath, ngrams=ngrams, vectorizer=vectorizer)
return cls(ouv_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath,ngrams, vectorizer):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of ReviewVectorizer
"""
with open(vectorizer_filepath) as fp:
return OuvVectorizer.from_serializable(json.load(fp),ngrams=ngrams, vectorizer=vectorizer)
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe
Args:
split (str): one of "train", "val", or "test"
"""
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's features (x_data) and component for labels (y_target and y_fuzzy)
"""
row = self._target_df.iloc[index]
ouv_vector = \
self._vectorizer.vectorize(row.data)
true_label = \
np.fromstring(row.true[1:-1],dtype=float, sep=' ')
if len(true_label)==10:
true_label = np.append(true_label,0.0)
fuzzy_label = \
np.fromstring(row.fuzzy[1:-1],dtype=float, sep=' ')
return {'x_data': ouv_vector,
'y_target': true_label,
'y_fuzzy': fuzzy_label
}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
```
## The Model: Naive_Bayers_Classifier
```
class MLPClassifier(nn.Module):
def __init__(self, embedding_size, hidden_dim, num_classes, dropout_p,
pretrained_embeddings=None, padding_idx=0):
"""
Args:
embedding_size (int): size of the embedding vectors
num_embeddings (int): number of embedding vectors
hidden_dim (int): the size of the hidden dimension
num_classes (int): the number of classes in classification
dropout_p (float): a dropout parameter
pretrained_embeddings (numpy.array): previously trained word embeddings
default is None. If provided,
padding_idx (int): an index representing a null position
"""
super(MLPClassifier, self).__init__()
self._dropout_p = dropout_p
self.dropout = nn.Dropout(dropout_p)
self.fc1 = nn.Linear(embedding_size, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, dataset._max_seq_length)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, num_classes)
"""
intermediate_vector = F.relu(self.dropout(self.fc1(x_in)))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
```
## Training Routine
### Helper Functions
```
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_k_acc_val': 0,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_1_acc': [],
'train_k_acc': [],
'train_k_jac': [],
'val_loss': [],
'val_1_acc': [],
'val_k_acc': [],
'val_k_jac': [],
'test_loss': -1,
'test_1_acc': -1,
'test_k_acc':-1,
'test_k_jac':-1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
acc_tm1, acc_t = train_state['val_k_acc'][-2:]
# If accuracy worsened
if acc_t <= train_state['early_stopping_best_k_acc_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model from sklearn
if acc_t > train_state['early_stopping_best_k_acc_val']:
train_state['early_stopping_best_k_acc_val'] = acc_t
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
```
### Evaluation Metrics
```
def compute_cross_entropy(y_pred, y_target):
y_target = y_target.cpu().float()
y_pred = y_pred.cpu().float()
criterion = nn.BCEWithLogitsLoss()
return criterion(y_target, y_pred)
def compute_1_accuracy(y_pred, y_target):
y_target_indices = y_target.max(dim=1)[1]
y_pred_indices = y_pred.max(dim=1)[1]
n_correct = torch.eq(y_pred_indices, y_target_indices).sum().item()
return n_correct / len(y_pred_indices) * 100
def compute_k_accuracy(y_pred, y_target, k=3):
y_pred_indices = y_pred.topk(k, dim=1)[1]
y_target_indices = y_target.max(dim=1)[1]
n_correct = torch.tensor([y_pred_indices[i] in y_target_indices[i] for i in range(len(y_pred))]).sum().item()
return n_correct / len(y_pred_indices) * 100
def compute_k_jaccard_index(y_pred, y_target, k=3):
y_target_indices = y_target.topk(k, dim=1)[1]
y_pred_indices = y_pred.max(dim=1)[1]
jaccard = torch.tensor([len(np.intersect1d(y_target_indices[i], y_pred_indices[i]))/
len(np.union1d(y_target_indices[i], y_pred_indices[i]))
for i in range(len(y_pred))]).sum().item()
return jaccard / len(y_pred_indices)
def compute_jaccard_index(y_pred, y_target, k=3, multilabel=False):
threshold = 1.0/(k+1)
threshold_2 = 0.5
if multilabel:
y_pred_indices = y_pred.gt(threshold_2)
else:
y_pred_indices = y_pred.gt(threshold)
y_target_indices = y_target.gt(threshold)
jaccard = ((y_target_indices*y_pred_indices).sum(axis=1)/((y_target_indices+y_pred_indices).sum(axis=1)+1e-8)).sum().item()
return jaccard / len(y_pred_indices)
def softmax_sensitive(T):
T = np.exp(T) - np.exp(0) + 1e-9
if len(T.shape)==1:
return T/T.sum()
return T/(T.sum(axis=1).unsqueeze(1))
def cross_entropy(pred, soft_targets):
logsoftmax = nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(- soft_targets * logsoftmax(pred), 1))
# convert a df to tensor to be used in pytorch
def df_to_tensor(df):
device = args.device
return torch.from_numpy(df.values).float().to(device)
def get_prior():
prior = pd.read_csv(args.prior_csv,sep=';',names=classes[:-1], skiprows=1)
prior['Others'] = 1
prior = prior.T
prior['Others'] = 1
prior = df_to_tensor(prior)
return prior
def compute_fuzzy_label(y_target, y_fuzzy, fuzzy=False, how='uni', lbd=0):
'''
Using two sets of prediction labels and fuzziness parameters to compute the fuzzy label in the form as
a distribution over classes
Args:
y_target (torch.Tensor) of shape (n_batch, n_classes): the true label of the ouv description
y_fuzzy (torch.Tensor) of shape (n_batch, n_classes): the fuzzy label of the ouv description
fuzzy (bool): whether or not to turn on the fuzziness option
how (string): the way fuzziness weights are used, one of the options in {'uni', 'prior'}
lbd (float): the scaler applied to the fuzziness of the label
Returns:
A pytorch Tensor of shape (n_batch, n_classes): The processed label in the form of distribution that add to 1
'''
assert y_target.shape == y_fuzzy.shape, 'target labels must have the same size'
assert how in {'uni', 'prior', 'origin'}, '''how must be one of the two options in {'uni', 'prior', 'origin'}'''
if not fuzzy:
return softmax_sensitive(y_target)
if how == 'uni':
y_label = y_target + lbd * y_fuzzy
return softmax_sensitive(y_label)
### TO DO ###
elif how == 'prior':
prior = get_prior()
y_inter = torch.matmul(y_target.float(),prior)
y_inter = y_inter/(y_inter.max(dim=1, keepdim=True)[0])
y_label = y_target + lbd * y_fuzzy * y_inter
return softmax_sensitive(y_label)
else:
y_label = y_target + lbd
return softmax_sensitive(y_label)
def sparse_to_tensor(M):
"""
input: M is Scipy sparse matrix
output: pytorch sparse tensor in GPU
"""
M = M.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack((M.row, M.col))).long()
values = torch.from_numpy(M.data)
shape = torch.Size(M.shape)
Ms = torch.sparse.FloatTensor(indices, values, shape, device=args.device)
return Ms.to_dense()
```
### General Utilities
```
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
```
### Settings and Some Prep Work
```
args = Namespace(
# Data and Path information
frequency_cutoff=1,
model_state_file='model.pth',
ouv_csv='Data/ouv_with_splits_full.csv',
#ouv_csv='Data/all_with_splits_full.csv',
prior_csv = 'Data/Coappearance_matrix.csv',
save_dir='model_storage/ngram/',
vectorizer_file='vectorizer.json',
# Model hyper parameters
ngrams=2,
hidden_dim=200,
# Training hyper parameters
batch_size=128,
early_stopping_criteria=5,
learning_rate=0.0002,
l2 = 1e-5,
dropout_p=0.5,
k = 3,
fuzzy = True,
fuzzy_how = 'uni',
fuzzy_lambda = 0.1,
num_epochs=100,
seed=1337,
# Runtime options
catch_keyboard_interrupt=True,
cuda=True,
expand_filepaths_to_save_dir=True,
reload_from_files=False,
)
classes = ['Criteria i', 'Criteria ii', 'Criteria iii', 'Criteria iv', 'Criteria v', 'Criteria vi',
'Criteria vii', 'Criteria viii', 'Criteria ix', 'Criteria x', 'Others']
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
print("Using CUDA: {}".format(args.cuda))
args.device = torch.device("cuda" if args.cuda else "cpu")
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
```
## Initialization
```
set_seed_everywhere(args.seed, args.cuda)
if args.reload_from_files:
# training from a checkpoint
dataset = OuvDataset.load_dataset_and_load_vectorizer(args.ouv_csv, args.vectorizer_file)
else:
# create dataset and vectorizer
dataset = OuvDataset.load_dataset_and_make_vectorizer(args.ouv_csv,
cutoff=args.frequency_cutoff, ngrams=args.ngrams)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
embedding_size = len(vectorizer.vectorizer.vocabulary_)
classifier = MLPClassifier(embedding_size=embedding_size,
hidden_dim=args.hidden_dim,
num_classes=len(classes),
dropout_p=args.dropout_p)
embedding_size
```
### Training Loop
```
with profiler.profile(record_shapes=True) as prof:
with profiler.record_function("model_inference"):
classifier(X)
print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
```
## Loading Trained Models
### Option 1 LS Model
```
with open(args.save_dir+'hyperdict_fuzzy.p', 'rb') as fp:
hyperdict_fuzzy = pickle.load(fp)
train_state = hyperdict_fuzzy[('uni',0.1)]
classifier.load_state_dict(torch.load(args.save_dir+'1337/model.pth',map_location=torch.device('cpu')))
classifier.eval()
```
### Option 2 Baseline w/o LS
```
with open(args.save_dir+'hyperdict_fuzzy.p', 'rb') as fp:
hyperdict_fuzzy = pickle.load(fp)
train_state = hyperdict_fuzzy[('uni',0)]
classifier.load_state_dict(torch.load(args.save_dir+'baseline/model.pth',map_location=torch.device('cpu')))
classifier.eval()
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
count_parameters(classifier)
# compute the loss & accuracy on the test set using the best available model
loss_func = cross_entropy
set_seed_everywhere(args.seed, args.cuda)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_1_acc = 0.
running_k_acc = 0.
running_k_jac = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# get the data compute fuzzy labels
X = batch_dict['x_data']
y_target = batch_dict['y_target']
y_fuzzy = batch_dict['y_fuzzy']
Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy,
how=args.fuzzy_how, lbd = args.fuzzy_lambda)
# compute the output
with torch.no_grad():
y_pred = classifier(X)
# compute the loss
loss = loss_func(y_pred, Y)
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_1_t = compute_1_accuracy(y_pred, y_target)
acc_k_t = compute_k_accuracy(y_pred, y_target, args.k)
jac_k_t = compute_jaccard_index(y_pred, y_target, args.k)
running_1_acc += (acc_1_t - running_1_acc) / (batch_index + 1)
running_k_acc += (acc_k_t - running_k_acc) / (batch_index + 1)
running_k_jac += (jac_k_t - running_k_jac) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_1_acc'] = running_1_acc
train_state['test_k_acc'] = running_k_acc
train_state['test_k_jac'] = running_k_jac
# Result of LS Model
train_state
# Result of Baseline
train_state
```
## Inference
```
def preprocess_text(text):
text = text.lower()
text = re.sub(r"([.,!?])", r" \1 ", text)
text = re.sub(r"[^a-zA-Z.,!?]+", r" ", text)
return text
def predict_rating(text, classifier, vectorizer, classes, k=1):
"""Predict the rating of a review
Args:
text (str): the text of the description
classifier (ReviewClassifier): the trained model
vectorizer (ReviewVectorizer): the corresponding vectorizer
classes (list of str): The name of the ouv classes
k (int): show the largest k prediction, default to 1
"""
classifier.eval()
ouv = preprocess_text(text)
vectorized_ouv = vectorizer.vectorize(ouv)
X = vectorized_ouv.view(1,-1)
with torch.no_grad():
result = classifier(vectorized_ouv.unsqueeze(0), apply_softmax=True)
if k==1:
pred_id = result.argmax().item()
return (classes[pred_id], result[0][pred_id])
else:
pred_indices = [i.item() for i in result.topk(k)[1][0]]
output = []
for pred_id in pred_indices:
output.append((classes[pred_id],result[0][pred_id].item()))
return output
test_ouv = 'this is a very old building dating back to 13th century'
prediction = predict_rating(test_ouv,classifier,vectorizer,classes)
print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))
test_ouv = 'this is a very old building dating back to 13th century'
k=3
predictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))
test_ouv = 'The particular layout of the complex is unique to this site'
k=3
predictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))
test_ouv = '''the lagoon of venice also has one of the highest concentrations of masterpieces in the world from
torcellos cathedral to the church of santa maria della salute . the years of the republics extraordinary golden
age are represented by monuments of incomparable beauty'''
k=3
predictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))
test_ouv = '''the lagoon of venice also has one of the highest concentrations of masterpieces in the world'''
k=3
predictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))
test_ouv = '''from torcellos cathedral to the church of santa maria della salute'''
k=3
predictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))
test_ouv = '''the years of the republics extraordinary golden age are represented by monuments of incomparable beauty'''
k=3
predictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))
import time
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print('[%s]' % self.name,)
print('Elapsed: %s' % (time.time() - self.tstart))
set_seed_everywhere(args.seed, args.cuda)
test_ouv = 'The particular layout of the complex is unique to this site'
k=3
with Timer():
predictions = predict_rating(test_ouv,classifier,vectorizer,classes,k=k)
```
## Interpretability
```
def infer_tokens_importance(vocab, classifier, vectorizer, classes, k=50):
"""Predict the rating of a review
Args:
vocab (list of str): the whole vocabulary
classifier (ReviewClassifier): the trained model
vectorizer (ReviewVectorizer): the corresponding vectorizer
classes (list of str): The name of the ouv classes
k (int): show the largest k prediction, default to 1
"""
classifier.eval()
X = sparse_to_tensor(vectorizer.vectorizer.transform(list(vocab.keys())))
with torch.no_grad():
result = classifier(X, apply_softmax=True)
vocab_id = result[1:].topk(k, dim=0)[1]
vocab_weight = result[1:].topk(k, dim=0)[0]
return vocab_id, vocab_weight
vocab = vectorizer.vectorizer.vocabulary_
len(vocab)
all_k = infer_tokens_importance(vocab, classifier, vectorizer, classes, k=50)[0]
all_k.shape
id_vocab = {vocab[token]:token for token in vocab.keys()}
def make_top_k_DataFrame(vocab, classifier, vectorizer, classes, k=10):
vocab_id = infer_tokens_importance(vocab, classifier, vectorizer, classes, k)[0]
df = pd.DataFrame(columns = classes)
for i in range(len(classes)):
indices = vocab_id[:,i].tolist()
words = pd.Series([id_vocab[j] for j in indices])
df[classes[i]] = words
return df
make_top_k_DataFrame(vocab, classifier, vectorizer, classes, k=20)
make_top_k_DataFrame(vocab, classifier, vectorizer, classes, k=50).to_csv(args.save_dir+'top_words.csv')
```
## Confusion Matrix
```
dataset.set_split('test')
set_seed_everywhere(args.seed, args.cuda)
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
conf_mat_test = np.zeros((len(classes)-1,len(classes)-1))
for batch_index, batch_dict in enumerate(batch_generator):
# get the data compute fuzzy labels
X = batch_dict['x_data']
y_target = batch_dict['y_target']
y_fuzzy = batch_dict['y_fuzzy']
Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy,
how=args.fuzzy_how, lbd = args.fuzzy_lambda)
# compute the output
y_pred = classifier(X)
conf_mat_test = np.add(conf_mat_test,confusion_matrix(y_target.argmax(axis=1), y_pred.argmax(axis=1),
labels=range(len(classes)-1)))
conf_mat_test
dataset.set_split('val')
set_seed_everywhere(args.seed, args.cuda)
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
conf_mat_val = np.zeros((len(classes)-1,len(classes)-1))
for batch_index, batch_dict in enumerate(batch_generator):
# get the data compute fuzzy labels
X = batch_dict['x_data']
y_target = batch_dict['y_target']
y_fuzzy = batch_dict['y_fuzzy']
Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy,
how=args.fuzzy_how, lbd = args.fuzzy_lambda)
# compute the output
y_pred = classifier(X)
conf_mat_val = np.add(conf_mat_val,confusion_matrix(y_target.argmax(axis=1), y_pred.argmax(axis=1),labels=range(len(classes)-1)))
conf_mat_val
dataset.set_split('train')
set_seed_everywhere(args.seed, args.cuda)
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
conf_mat_train = np.zeros((len(classes)-1,len(classes)-1))
for batch_index, batch_dict in enumerate(batch_generator):
# get the data compute fuzzy labels
X = batch_dict['x_data']
y_target = batch_dict['y_target']
y_fuzzy = batch_dict['y_fuzzy']
Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy,
how=args.fuzzy_how, lbd = args.fuzzy_lambda)
# compute the output
y_pred = classifier(X)
conf_mat_train = np.add(conf_mat_train,confusion_matrix(y_target.argmax(axis=1), y_pred.argmax(axis=1),labels=range(len(classes)-1)))
conf_mat_train
pd.concat([pd.DataFrame(conf_mat_test),pd.DataFrame(conf_mat_val),pd.DataFrame(conf_mat_train)],axis=1).to_csv(args.save_dir+'confusion_matrix.csv')
pd.concat([pd.DataFrame(conf_mat_test),pd.DataFrame(conf_mat_val),pd.DataFrame(conf_mat_train)],axis=1).to_csv(args.save_dir+'baseline_confusion_matrix.csv')
def per_class_metrics(confusion_matrix, classes):
'''
Compute the per class precision, recall, and F1 for all the classes
Args:
confusion_matrix (np.ndarry) with shape of (n_classes,n_classes): a confusion matrix of interest
classes (list of str) with shape (n_classes,): The names of classes
Returns:
metrics_dict (dictionary): a dictionary that records the per class metrics
'''
num_class = confusion_matrix.shape[0]
metrics_dict = {}
for i in range(num_class):
key = classes[i]
temp_dict = {}
row = confusion_matrix[i,:]
col = confusion_matrix[:,i]
val = confusion_matrix[i,i]
precision = val/row.sum()
recall = val/col.sum()
F1 = 2*(precision*recall)/(precision+recall)
temp_dict['precision'] = precision
temp_dict['recall'] = recall
temp_dict['F1'] = F1
metrics_dict[key] = temp_dict
return metrics_dict
metrics_dict = {}
metrics_dict['test'] = per_class_metrics(conf_mat_test, classes[:-1])
metrics_dict['val'] = per_class_metrics(conf_mat_val, classes[:-1])
metrics_dict['train'] = per_class_metrics(conf_mat_train, classes[:-1])
metrics_df = pd.DataFrame.from_dict({(i,j): metrics_dict[i][j]
for i in metrics_dict.keys()
for j in metrics_dict[i].keys()},
orient='index')
metrics_df.to_csv(args.save_dir+'per_class_metrics.csv')
metrics_df.to_csv(args.save_dir+'baseline_per_class_metrics.csv')
```
## Try on totally Unseen Data
```
#ouv_csv='Data/ouv_with_splits_full.csv',
new_ouv_csv='Data/sd_full.csv'
def compute_jac_k_accuracy(y_pred, y_target, k=3, multilabel=False):
y_pred_indices = y_pred.topk(k, dim=1)[1]
y_target_indices = y_target.topk(k, dim=1)[1]
n_correct = torch.tensor([torch.tensor([y_pred_indices[j][i] in y_target_indices[j] for i in range(k)]).sum()>0
for j in range(len(y_pred))]).sum().item()
return n_correct / len(y_pred_indices) * 100
def compute_jac_1_accuracy(y_pred, y_target, k=3, multilabel=False):
y_pred_indices = y_pred.topk(1, dim=1)[1]
y_target_indices = y_target.topk(k, dim=1)[1]
n_correct = torch.tensor([torch.tensor([y_pred_indices[j] in y_target_indices[j] for i in range(k)]).sum()>0
for j in range(len(y_pred))]).sum().item()
return n_correct / len(y_pred_indices) * 100
with Timer():
loss_func = cross_entropy
set_seed_everywhere(args.seed, args.cuda)
train_state = make_train_state(args)
dataset = OuvDataset.load_dataset_and_load_vectorizer(new_ouv_csv, args.vectorizer_file,
ngrams=args.ngrams, vectorizer=vectorizer.vectorizer)
dataset.set_split('val')
verbose=False
try:
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_1_acc = 0.0
running_k_acc = 0.0
running_k_jac = 0.0
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# step 2. get the data compute fuzzy labels
X = batch_dict['x_data']
y_target = batch_dict['y_target']
y_fuzzy = batch_dict['y_fuzzy']
Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy,
how=args.fuzzy_how, lbd = args.fuzzy_lambda)
# step 3. compute the output
with torch.no_grad():
y_pred = classifier(X)
# step 4. compute the loss
loss = loss_func(y_pred, Y)
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# -----------------------------------------
# compute the accuracies
acc_1_t = compute_jac_1_accuracy(y_pred, y_target)
acc_k_t = compute_jac_k_accuracy(y_pred, y_target, args.k)
jac_k_t = compute_jaccard_index(y_pred, y_target, len(classes))
running_1_acc += (acc_1_t - running_1_acc) / (batch_index + 1)
running_k_acc += (acc_k_t - running_k_acc) / (batch_index + 1)
running_k_jac += (jac_k_t - running_k_jac) / (batch_index + 1)
# update bar
if verbose:
val_bar.set_postfix(loss=running_loss,
acc_1=running_1_acc,
acc_k=running_k_acc,
jac_k=running_k_jac,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_1_acc'].append(running_1_acc)
train_state['val_k_acc'].append(running_k_acc)
train_state['val_k_jac'].append(running_k_jac)
except KeyboardInterrupt:
print("Exiting loop")
pass
# LS Model
train_state
# Baseline
train_state
```
## END
| github_jupyter |
# A/B and A/A tests and the power to detect a difference on a binary task (e.g. churn or propensity to buy)
A/B tests are used to detect a difference in two populations. Here we look at churn on 2 cohorts who have a low churn rate (5%), we'd like to determine how many people we need to sample to reliably detect an improvement in churn by 5% (i.e. 5%->4.75% churn improvement). The necessary sample size is probably larger than we'd desire!
The two papers below discuss ways to reduce variance and thereby increase the statistic power of a test (here we're using a ChiSq test on counts of a simulated binary outcome).
Detecting small changes requires a huge population. Detecting large changes (which are more likely to occur if you break something that was good, than if you improve something that was already good) is easier.
If you don't control for statistical power you're likely to have test groups that are too small, so you rarely know that you've seen a real change, leading to rejections of small but useful improvements.
_Sidenote_ - fixing churn is hard anyhow as folk who will churn will do so for lots of reasons (e.g. dissatisfaction, price, low use of product) so it is more likely that this is a product design test than a machine-learning led intervention.
Two good papers:
* "Trustworthy Online Controlled Experiments: Five Puzzling Outcomes Explained, 2012 (KDD), Kohavi, Deng, Frasca, Longbotham, Walker, Xu" (https://dl.acm.org/doi/10.1145/2339530.2339653)
* "Practical Guide to Controlled Experiments on the Web, 2007 (KDD), Kohavi, Henne, Sommerfield" (https://courses.cs.washington.edu/courses/cse454/15au/papers/p959-kohavi.pdf)
```
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.stats import ks_2samp
import tqdm
import sys
from scipy.stats import chi2_contingency
def set_common_mpl_styles(
ax, legend=True, grid_axis="y", ylabel=None, xlabel=None, title=None, ymin=None,
xmin=None
):
ax.grid(axis=grid_axis)
if legend == False:
ax.legend_.remove()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xlabel is not None:
ax.set_xlabel(xlabel)
if title is not None:
ax.set_title(title)
if ymin is not None:
ax.set_ylim(ymin=ymin)
if xmin is not None:
ax.set_xlim(xmin=xmin)
# Check on usage of chi2, given a contingency table we get a pvalue and expectation
# If the contingency table significantly diverges from the expected values then we
# get a small pvalue, if the tables are roughly the same then the pvalue is close to 1.0
obs = np.array([[50, 50], [28, 71]])
import pprint
pprint.pprint(obs)
chi2, pvalue, dof, expected = chi2_contingency(obs)
# a pvalue < 0.05 means we can rejected the NH (NH: distributions are the same)
# and accept the AH of different distributions, with an expected error rate
# that we incorrectly reject the NH 5% of the time when there's no actual difference
print(f'pvalue for this result {pvalue:0.2f}')
print('Expected frequencies:')
print(expected)
```
```
Each experiment is repeated 10k times to get the general behaviour.
The NH is that the distributions are the same (no difference between the A and B groups). On an A A test we would hope not to see any rejections (but they'll occur due to random variation).
The AH is that the groups come from different distributions.
First we'll perform an A A test on 5k people (10k total). 5% of group A and B churn (i.e. they're the same - no intervention). On 4% of runs the NH is rejected. 96% of the time the NH is accepted. Even in this case we incorrectly see "a difference" on 4% of the experiments.
Whilst 4% or so might be rejected, we'd expect a 5% rejection if we did _lots_ more experiments (we're testing at the 5% level so we expect 5% false NH rejections).
Next we run an A B test on the same groups (10k people in total). 5% of group A churn, 4.75% of group B churn i.e. group B is 5% less likely to churn - we'd like to detect this difference by rejecting the NH. This time we reject the NH 8% of the time (i.e. we detect this small difference but not much more frequently than for the A A test), so 92% of the time we believe there's no difference between these groups (which is wrong). Subtle differences on small groups are not reliably detectable.
Next we run an A B test on the same groups, this time group B is 20% less likely to churn (group A churns at 5%, group B churns at 4%). With this larger difference we reject the NH 65% of the time (so 2/3 of the time we correctly identify that the difference is significant). 1/3 of the time we still believe there's no difference.
If each marketing experiment is run only once, using a larger cohort than is currently used (i.e. the 10k proposed above), we'd still see high variance in the results.
To reliably detect a relatively large 20% change in churn we'd need a population of 20k people under study (two groups of 10k each), this lets us identify the difference on 90% of the runs (i.e. we'd still see the wrong answer 10% of the time). Thus the power of this configuration is 90% (which is a commonly accepted level).
To detect a smaller improvement (which is more likely to be the case) we start to need significantly larger test groups.
To reliably detect a smaller 5% change in churn (assuming churn is 5% at the start) we'd need a population of 320,000 people in total, to get the correct answer 90% of the time. The research paper noted below has an estimator formula, it suggests we'd need circa 480k people to achieve this accuracy. This formula seems to overestimate but roughly gives the same answer as the simulation, so it is a useful and quick guide.
With 2,000,000 people in total (1M per group), in an A/A test, we reject the NH 5% of the time. With an A/B test with a very tiny variation (10^-7) we detect this change barely above the 5% level i.e. we can't detect very tiny changes even with a huge sample (we'd need an even-huger sample for this).
```
```
REPEATS = 10_000
PEOPLE_A = 10_000
PEOPLE_B = 10_000
CUTOFF_A = 0.05 # 5% churn
#B_MODIFIER = 1.0 # 1.0 means no change
#B_MODIFIER = 0.95 # 0.95 means go down by 5% (i.e. improve churn by 5%)
B_MODIFIER = 0.8 # 0.8 means go down by 20% (i.e. improve churn by 20%)
CUTOFF_B = CUTOFF_A * B_MODIFIER
print(f'{PEOPLE_A:,} in A, {PEOPLE_B:,} in B')
print(f'P(churn|A) == {CUTOFF_A*100:0.2f}%, P(churn|B) == {CUTOFF_B*100:0.2f}%')
```
### Estimate using "Practical Guide to Controlled Experiments on the Web" (paper) formula
Using "Practical Guide to Controlled Experiments on the Web, 2007 (KDD), Kohavi, Henne, Sommerfield" (https://courses.cs.washington.edu/courses/cse454/15au/papers/p959-kohavi.pdf) we can estimate how many participants we need to track if we're going to do a >90% power 2 category ChiSq test.
**NOTE** that this estimation method _overestimates_ the number of samples required, but is correct (given my experiments) within a factor of 2.
```
# "Practical Guide to Controlled Experiments on the Web, 2007 (KDD), Kohavi, Henne, Sommerfield"
NBR_VARIANTS = 2
DESIRED_LEVEL_OF_CHANGE_TO_DETECT = max(1-B_MODIFIER, 0.0000000001) # avoid 0 else this estimator fails
STD_DEV_OEC = np.sqrt(CUTOFF_A * (1-CUTOFF_A)) # std dev of Bernoulli trial on an X% event e.g. 5% churn rate
DELTA = CUTOFF_A * DESIRED_LEVEL_OF_CHANGE_TO_DETECT
nbr_trials_we_need = int((4 * NBR_VARIANTS * STD_DEV_OEC / (DELTA)) ** 2)
print(f"According to the _estimate_ formula we'll need {nbr_trials_we_need:,} participants in total")
print('Note that this formula is quick and it over-estimates the population size')
NEW_B_MODIFIERS = [0.95, 0.9, 0.85, 0.8, 0.75] # reduce churn by a set of
#NEW_B_MODIFIERS = np.arange(0.99, 0.85, -0.01)
total_estimated_participants = []
results_to_plot = []
for new_b_modifier in NEW_B_MODIFIERS:
NBR_VARIANTS = 2
DESIRED_LEVEL_OF_CHANGE_TO_DETECT = max(1-new_b_modifier, 0.0000000001) # avoid 0 else this estimator fails
STD_DEV_OEC = np.sqrt(CUTOFF_A * (1-CUTOFF_A)) # std dev of Bernoulli trial on an X% event e.g. 5% churn rate
DELTA = CUTOFF_A * DESIRED_LEVEL_OF_CHANGE_TO_DETECT
nbr_trials_we_need = int((4 * NBR_VARIANTS * STD_DEV_OEC / (DELTA)) ** 2)
total_estimated_participants.append(nbr_trials_we_need)
results_to_plot.append({'change_to_detect': (1-new_b_modifier)*100, 'nbr_trials': nbr_trials_we_need})
def set_human_format(ax, on_x_axis=False, on_y_axis=False, **kwargs):
'''Add commas e.g. 1_000_000 -> "1,000,000"'''
# note hardcoded in this case to the one plot I'm using below
if on_x_axis == False and on_y_axis == False:
raise ValueError("An axis must be chosen!")
if on_x_axis:
axis = ax.get_xaxis()
axis.set_major_formatter(
mpl.ticker.FuncFormatter(lambda x, p: f"{results_to_plot_df.index[x]:0.0f}%")
)
if on_y_axis:
axis = ax.get_yaxis()
axis.set_major_formatter(
mpl.ticker.FuncFormatter(lambda x, p: f"{int(x):,}")
)
fig, axs = plt.subplots(figsize=(6, 6), constrained_layout=True)
ax = axs
results_to_plot_df = pd.DataFrame(results_to_plot).set_index('change_to_detect').sort_index(ascending=False)
results_to_plot_df.plot(kind='bar', ax=ax, )
title = 'With larger expected changes we need significantly fewer samples\nfor small changes we need disproportionately more samples\nto reliably detect a true difference 90% of the time'
set_common_mpl_styles(ax, title=title, ylabel='Number of trials required',
xlabel = 'Level of change we wish to detect', ymin=0, legend=False)
#results_to_plot.style.format({'nbr_trials':"0.0f%"})
set_human_format(ax, on_y_axis=True, on_x_axis=True)
```
### Monte Carlo simulation result
```
pvalues = []
print(f'{PEOPLE_A:,} in A, {PEOPLE_B:,} in B')
print(f'P(churn|A) == {CUTOFF_A*100:0.2f}%, P(churn|B) == {CUTOFF_B*100:0.2f}%')
sys.stdout.flush() # ignore print conflict with tqdm
nbr_times_mean_a_gt_mean_b = []
mean_a = []
mean_b = []
for rep in tqdm.tqdm(range(REPEATS)):
# real values in the continuous uniform range [0, 1)
# then cast to ints (0 or 1) for chi sq frequency table
arr_a = (np.random.random_sample(size=PEOPLE_A) < CUTOFF_A).astype('int')
arr_b = (np.random.random_sample(size=PEOPLE_B) < CUTOFF_B).astype('int')
# create a chi sq frequency table (as above)
obs = [[arr_a.sum(), arr_a.shape[0]-arr_a.sum()], [arr_b.sum(), arr_b.shape[0]-arr_b.sum()]]
chi2, pvalue, dof, expected = chi2_contingency(obs)
pvalues.append(pvalue)
mean_a.append(arr_a.mean())
mean_b.append(arr_b.mean())
nbr_times_mean_a_gt_mean_b.append(arr_a.mean() > arr_b.mean())
pvalues = np.array(pvalues) # turn list to array
sys.stderr.flush()
print('Calculate nbr of times we reject NH of no-difference:')
print(f'{sum(pvalues < 0.05) / REPEATS * 100:0.1f}% Null Hyp rejects (NH: No difference in distributions)')
# Calculate how often mean_a > mean_b, if we had a significant result we would then follow
# this guidance which for an A A test would mean a random choice (as mean_a==mean_b==50%)
fraction_time_mean_a_gt_mean_b = sum(nbr_times_mean_a_gt_mean_b) / len(nbr_times_mean_a_gt_mean_b)
print(f'Percentage of time mean-A is greater than mean-B: {fraction_time_mean_a_gt_mean_b*100:0.2f}%')
df = pd.DataFrame({'mean_a': mean_a, 'mean_b': mean_b})
fig, axs = plt.subplots(figsize = (6, 6), ncols=1, sharey=True)
ax = axs
border_color = ['r' if v==True else 'b' for v in pvalues < 0.05] # r if reject NH
df.plot(kind='scatter', x='mean_a', y='mean_b', ax=ax, alpha=1, edgecolors=border_color)
title = f'Means of A and B on {len(mean_a):,} repeated experiments'
title += "\nStraight line shows expectation if there's no relationship"
title += "\nbut ignores where on the line we'd be"
title += "\nred border == pvalue < 0.5 else blue border"
set_common_mpl_styles(ax, ylabel='mean_b (note false 0)', xlabel='mean_a (note false 0)', title=title)
min_val = min(ax.get_xlim()[0], ax.get_ylim()[0])
max_val = max(ax.get_xlim()[1], ax.get_ylim()[1])
ax.set_xlim(xmin=min_val, xmax=max_val)
ax.set_ylim(ymin=min_val, ymax=max_val);
ax.plot([min_val, max_val], [min_val, max_val], color='blue');
```
## Plot distribution of pvalues from this test
The left-most bin counts the number of times we've seen a pvalue < 0.05. We can visually see if the pvalues are evenly distributed (which suggests an A A test) or if they're biased one way, if biased to the left then it suggests for a large enough set of repeats that we're seeing a difference in the population.
```
fig, axs = plt.subplots(figsize = (8, 6), ncols=1, sharey=True, constrained_layout=True)
bins = np.linspace(0, 1, 21)
ax = axs
ser = pd.cut(pd.Series(pvalues), bins, right=False).value_counts().sort_index()
ser.plot(kind='bar')
set_common_mpl_styles(ax, ymin=-1, ylabel='Frequency', xlabel='pvalue',
title=r"Distribution of $\chi^2$ pvalues for NH on boolean Churn results")
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
df = pd.DataFrame({'A': arr_a, 'B': arr_b})
print('A and B groups with no-churn (0) and churn (1)')
labels = {0:'no churn', 1: 'churn'}
pd.crosstab(df['A'], df['B'], margins=True, normalize=True). \
rename(columns=labels, index=labels).style.format('{:.2%}')
```
| github_jupyter |
# Mass Transports
Transport diagnostics for flow through major straits.
## Theory
Formally, mass transports are given by
$$T_x = \rho u $$
$$T_y = \rho v $$
Mass transports are diagnostics that are calculated online by the model:
|--|
|variable|long name|units|dimensions|
|--|
|tx_trans|T-cell i-mass transport|Sv|(time,st_ocean,xu_ocean,yt_ocean)|
|ty_trans|T-cell j-mass transport|Sv|(time,st_ocean,xt_ocean,yu_ocean)|
These variables are saved in `ocean.nc` files.
## Calculation
```
%matplotlib inline
import cosima_cookbook as cc
import matplotlib.pyplot as plt
```
Use default database for this calculation
```
session = cc.database.create_session()
```
This dictionary defines a few key choke points that you might be interested in. For the purposes of demonstration we are just using Drake Passage in this example.
```
straights = { 'DrakePassage': [-69.9, -69.9, -71.6, -51.0],
'Lombok': [-244.6+360, -243.9+360, -8.6, -8.6],
'Ombai' : [-235.0+360, -235.0+360, -9.2, -8.1],
'Timor' : [-235.9+360, -235.9+360, -11.9, -9.9],
'Bering' : [-172, -167, 65.8, 65.8],
'Denmark' : [-42, -22, 65.8, 65.8],
}
def calc_transport(expt, straight, n=24):
"""
Calculate barotropic transport across a given
line of latitude or longitude.
Designed for flow through straits.
"""
print('Calculating {}:{} transport'.format(expt, straight))
xmin, xmax, ymin, ymax = straights[straight]
if xmin == xmax:
tx_trans = cc.querying.getvar(expt,'tx_trans',session)
transport = tx_trans.sel(xu_ocean=xmin, method='nearest')\
.sel(yt_ocean=slice(ymin, ymax))\
.sum('st_ocean').sum('yt_ocean')/1e6/1036
elif ymin == ymax:
ty_trans = cc.querying.getvar(expt,'tx_trans',session)
transport = ty_trans.sel(yu_ocean=ymin, method='nearest')\
.sel(xt_ocean=slice(xmin, xmax))\
.sum('st_ocean').sum('xt_ocean')/1e6/1036
else:
raise ValueError('Transports are computed only along lines of either constant latitude or longitude')
transport = transport.compute()
return transport
%%time
#expt = '025deg_jra55v13_ryf8485_gmredi6'
expt = '01deg_jra55v13_ryf8485_spinup6_000-413'
transport = calc_transport(expt, 'DrakePassage')
transport.plot(linestyle='-')
```
| github_jupyter |
# Introduction to Adaptive Thresholding
This tutorial will go over some basic concepts you may wish to consider when setting thresholds for production models or otherwise.
## Make Some Data
This tutorial doesn't actually require real data--nor even a model! We'll make some fake data to get the idea. Don't worry too much about this step. Let's just assume we have a series of scores. These could represent model performance, divergences, or model scores themselves. Throughout this tutorial, we'll assume that increasing values of this score will be increasingly likely to represent a good alert. Then we are left to determine an appropriate threshold to balance true/false positive/negatives. This is balancing time wasted on bad alerts with the utility gained from finding a good alert that resulted from a lower score.
```
import numpy
import pandas
numpy.random.seed(0)
pandas.options.display.max_rows=5 # restrict to 5 rows on display
n_positive = 600
positives = numpy.random.beta(5, 1, size=n_positive)
n_negative = 5 * n_positive
negatives = numpy.random.beta(2, 3, size=n_negative)
data = pandas.DataFrame(numpy.asarray((numpy.concatenate((numpy.ones(n_positive),
numpy.zeros(n_negative))),
numpy.concatenate((positives,
negatives)))).T,
columns=['Ground Truth', 'Model Score'])
data = data.sample(frac=1, random_state=0).reset_index(drop=True)
data
```
## Positive and Negative Distributions
We want to determine the trade off between catching more true positives and getting more false negatives. Let's see what the distribution of scores associated with good (positive) and bad (negative) alerts looks like.
```
import seaborn as sns; sns.set()
import matplotlib
import matplotlib.pylab as plt
import numpy
plt.figure(figsize=(10, 2))
plt.title('Positive Scores')
plt.xlabel('Model Score')
plt.yticks([])
positives = data[data['Ground Truth'] == 1]['Model Score'].sample(100, random_state=0).values
sns.rugplot(positives,
height=1.0,
color='green',
label='Positive Samples')
plt.show()
import seaborn as sns; sns.set()
import matplotlib
import matplotlib.pylab as plt
import numpy
plt.figure(figsize=(10, 2))
plt.title('Negative Scores')
plt.xlabel('Model Score')
plt.yticks([])
negatives = data[data['Ground Truth'] == 0]['Model Score'].sample(100, random_state=0).values
sns.rugplot(negatives,
height=1.0,
color='red',
label='Negative Samples')
plt.show()
```
We can also plot an approximation of the probability distribution of positive and negative scores given our sample data.
```
plt.figure(figsize=(10, 10))
sns.set()
plt.title('Positive Score Distribution')
plt.xlabel('Model Score')
plt.ylabel('Probability Density')
positives = data[data['Ground Truth'] == 1]['Model Score'].values
sns.kdeplot(positives,
color='green',
label='Positive Score Distribution')
plt.show()
plt.figure(figsize=(10, 10))
sns.set()
plt.title('Negative Score Distribution')
plt.xlabel('Model Score')
plt.ylabel('Probability Density')
negatives = data[data['Ground Truth'] == 0]['Model Score'].values
sns.kdeplot(negatives,
color='red',
label='Negative Score Distribution')
plt.show()
```
It will also be important to keep in mind the distribution of model scores.
```
plt.figure(figsize=(10, 10))
sns.set()
plt.title('Score Distribution')
plt.xlabel('Model Score')
plt.ylabel('Probability Density')
scores = data['Model Score'].values
sns.kdeplot(scores,
color='blue',
label='Score Distribution')
plt.show()
```
## Computing Optimal Thresholds
When scoring a model after choosing a threshold, each model score can be associated with one of four possible outcomes:
1. Positive instance is scored above the threshold.
2. Negative instance is scored above the threshold.
3. Negative instance is scored at or below the threshold.
4. Positive instance is scored at or below the threshold.
Let's say each of these outcomes has an associated probability $p$ and an associated utility $u$ as determined by the business:
1. $p_\mathrm{tp}$, $u_\mathrm{tp}$
2. $p_\mathrm{fp}$, $u_\mathrm{fp}$
3. $p_\mathrm{tn}$, $u_\mathrm{tn}$
4. $p_\mathrm{fn}$, $u_\mathrm{fn}$
Then the expected utility of a scored sample with unknown ground truth is
$u = p_\mathrm{tp}u_\mathrm{tp} +
p_\mathrm{fp}u_\mathrm{fp} +
p_\mathrm{tn}u_\mathrm{tn} +
p_\mathrm{fn}u_\mathrm{fn}$
For the purposes of this experiment, let's say the business would be 10 times as disappointed to learn of a false negative than they would be to have to pay for analysts wasting their time on a false positive.
```
def utility(tp, fp, tn, fn):
return -10 * fn - fp
```
## Visualizing Utility of Each Threshold
Let's plot expected utility against some candidate thresholds. The algorithm below will generate a plot with error bars over the expected value. If you need to interpret them, you can say _there's a 50% chance the true utility of each threshold falls within the shaded region_.
```
import numpy
import matplotlib.pylab as plt
import seaborn as sns, numpy
from mvtk import thresholding
plt.figure(figsize=(10, 10))
plt.title('Expected Utility vs Threshold')
plt.xlabel('Threshold')
plt.ylabel('Expected Utility')
scores, utility_mean, utility_quantiles = thresholding.expected_utility(
utility, data[['Ground Truth', 'Model Score']].values)
thresholding.plot_err(scores,
utility_mean,
utility_quantiles,
label='Expected Utility')
leg = plt.legend()
for lh in leg.legendHandles:
lh.set_alpha(1)
plt.show()
```
## Finding the Optimal Threshold
This is the threshold that corresponds to the peak of the utility function plotted above.
You don't need to worry about the mechanics of this function, you can just copy and paste it.
```
thresholding.optimal_threshold(utility, data)
```
## Optimal Threshold Distribution
If we know our sample of positives and negatives is unbiased (e.g. the analysts were equally likely to label any instance of their data), we can generally express our uncertainty in the location of the optimal threshold (which stems from our uncertainty in the utility function) to compute a distribution over what our optimum threshold might be given the data we have so far.
You don't need to worry about the mechanics of this function, you can just copy and paste it.
```
sns.set(); numpy.random.seed(0)
plt.figure(figsize=(10, 10))
plt.xlim([0, 1])
plt.title('Threshold Distribution')
plt.xlabel('Threshold')
plt.ylabel('Probability Density')
sns.kdeplot(thresholding.thompson_sample(utility, data),
color='blue',
label='Likelihood Threshold is Optimal')
plt.show()
```
## Adaptive Thresholding
Without getting into the mechanics, we can dynamically choose between _exploration mode_, during which it will set the threshold to `0`, and _exploitation mode_, during which it will attempt to pick the optimal choice (or, in practice, something likely to be optimal).
## Online Learning
Here we will give an example of how to apply adaptive thresholding to an online learning problem.
In this example, we will iterate over the data we have in chronological order (since it's fake data, let's just assume it was already ordered chronologically) and simulate a system that applies the above adaptive thresholding algorithm to the problem of identifying a new optimal threshold each time a new label arrives (e.g. from someone checking in on an alert and determining if it's a good one).
```
thresholder = thresholding.AdaptiveThreshold(utility)
thresholds = []
for ground_truth, score in data[['Ground Truth', 'Model Score']].values:
thresholds.append(thresholder(ground_truth, score))
thresholds = numpy.asarray(thresholds)
```
What percent of the time did we end up setting the threshold to `0`? You'll notice we start out setting the threshold to `0` about 45% of the time to gather data, but that quickly drops to about 5% once we have a good understanding of the system.
```
import matplotlib.pylab as plt
plt.figure(figsize=(9, 9))
plt.ylabel('Exploration Percent (Moving Average)')
plt.xlabel('Epoch')
plt.plot(thresholding.exploration_proportion(thresholds, 100))
plt.show()
```
About 10% of the alerts triggered were just to get unbiased data.
```
(thresholds == 0).sum()
(thresholds == 0).mean()
```
## Examining Results
To get a feel for what the algorithm is doing, let's reconstruct the utility function plot as before, but with the 64 most recent thresholds. As you can see, the thresholds are landing pretty close to the optimal value, while we were typically only taking unbiased data 2 to 3% of the time.
```
import matplotlib.pylab as plt
import seaborn as sns, numpy
plt.figure(figsize=(10, 10))
plt.title('Expected Utility vs Threshold')
plt.xlabel('Threshold')
plt.ylabel('Expected Utility')
scores, utility_mean, utility_quantiles = thresholding.expected_utility(
utility, data[['Ground Truth', 'Model Score']].values)
# candidate thresholds are existing model scores
thresholding.plot_err(scores,
utility_mean,
utility_quantiles,
label='Expected Utility')
ax = sns.rugplot(thresholds[-64:], # most recent
color='green',
label='64 Most Recent Thresholds')
leg = plt.legend()
for lh in leg.legendHandles:
lh.set_alpha(1)
plt.show()
```
We can watch the distribution of (nonzero) thresholds chosen evolve over time and approach the ideal one (computed using _all_ the data in our data set).
```
%%capture
import matplotlib.pylab as plt
import seaborn as sns, numpy
import os
import shutil
from pathlib import Path
frame_dir = 'frames'
if os.path.exists(frame_dir):
shutil.rmtree(frame_dir)
Path(frame_dir).mkdir(parents=True, exist_ok=True)
def mkplot(thresholds, large_sample_ideal_thresholds):
sns.set(); numpy.random.seed(0)
plt.figure(figsize=(10, 10))
plt.xlim([0, 1])
plt.title(f'Threshold Distribution (epoch {i})')
plt.xlabel('Threshold')
plt.ylabel('Probability Density')
ax = sns.distplot(thresholds[thresholds > 0],
rug=True,
hist=False,
kde=True,
color='green',
label='Thresholds Chosen Using Unbiased Samples')
ax = sns.distplot(large_sample_ideal_thresholds, # most recent
rug=False,
hist=False,
kde=True,
color='blue',
label='Ideal Threshold Distribution')
leg = plt.legend(loc='upper right')
for lh in leg.legendHandles:
lh.set_alpha(1)
plt.savefig(os.path.join(frame_dir, f'im_{i}.png'))
large_sample_ideal_thresholds = thresholding.thompson_sample(utility, data)
N = 64
dn = len(thresholds) // N
j = 0
for num_frames, i in enumerate(range(dn, len(thresholds) + dn, dn)):
mkplot(thresholds[j:i], large_sample_ideal_thresholds)
j = i
mkplot(thresholds[j:], large_sample_ideal_thresholds)
import os
import imageio
images = []
for filename in sorted(os.listdir(frame_dir), key=lambda x: int(x[3:-4])):
images.append(imageio.imread(os.path.join(frame_dir, filename)))
imageio.mimsave('threshold_distribution_evolution.gif', images, duration=30 / (num_frames + 1))
```

| github_jupyter |
# Overlap matrices
This notebook will look at different ways of plotting overlap matrices and making them visually appealing.
One way to guarantee right color choices for color blind poeple is using this tool: https://davidmathlogic.com/colorblind
```
%pylab inline
import pandas as pd
import seaborn as sbn
sbn.set_style("ticks")
sbn.set_context("notebook", font_scale = 1.5)
data = np.loadtxt('raw_matrices_review.dat')
good = (data[:9][:])
bad = data[-9:][:]
ugly = data[9:18][:]
# Your Standard plot
fig =figsize(8,8)
ax = sbn.heatmap(bad,annot=True, fmt='.2f', linewidths=.3, annot_kws={"size": 14},square=True,robust=True,cmap=sbn.light_palette((210, 90, 60), input="husl") )
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
# Changing the colour map
from matplotlib import colors
from matplotlib.colors import LogNorm
#cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])
cmap = colors.ListedColormap(['#117733','#88CCEE', '#FBE8EB'])
bounds=[0.0, 0.025, 0.1, 0.8]
norm = colors.BoundaryNorm(bounds, cmap.N, clip=False)
cbar_kws=dict(ticks=[0.2, 0.4, 0.6, 0.8 ,1.0])
#ax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=.3, annot_kws={"size": 14},square=True,robust=True,cmap=cmap, norm=norm,cbar_kws=cbar_kws )
ax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=0, linecolor='white', annot_kws={"size": 14},square=True,robust=True,cmap='bone_r', vmin=0, vmax=1 )
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
for _, spine in ax.spines.items():
spine.set_visible(True)
show_annot_array = ugly >= 0.0001
for text, show_annot in zip(ax.texts, (element for row in show_annot_array for element in row)):
text.set_visible(show_annot)
# Changing the colour map
from matplotlib import colors
from matplotlib.colors import LogNorm
#cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])
cmap = colors.ListedColormap(['#117733','#88CCEE', '#FBE8EB'])
bounds=[0.0, 0.025, 0.1, 0.8]
norm = colors.BoundaryNorm(bounds, cmap.N, clip=False)
cbar_kws=dict(ticks=[0.2, 0.4, 0.6, 0.8 ,1.0])
#ax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=.3, annot_kws={"size": 14},square=True,robust=True,cmap=cmap, norm=norm,cbar_kws=cbar_kws )
ax = sbn.heatmap(good,annot=True, fmt='.2f', linewidths=0, linecolor='black', annot_kws={"size": 14},square=True,robust=True,cmap='bone_r',vmin=0, vmax=1 )
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
for _, spine in ax.spines.items():
spine.set_visible(True)
show_annot_array = good >= 0.001
for text, show_annot in zip(ax.texts, (element for row in show_annot_array for element in row)):
text.set_visible(show_annot)
# Changing the colour map
from matplotlib import colors
from matplotlib.colors import LogNorm
#cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])
cmap = colors.ListedColormap(['#117733','#88CCEE', '#FBE8EB'])
bounds=[0.0, 0.025, 0.1, 0.8]
norm = colors.BoundaryNorm(bounds, cmap.N, clip=False)
cbar_kws=dict(ticks=[0.2, 0.4, 0.6, 0.8 ,1.0])
#ax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=.3, annot_kws={"size": 14},square=True,robust=True,cmap=cmap, norm=norm,cbar_kws=cbar_kws )
ax = sbn.heatmap(bad,annot=True, fmt='.2f', linewidths=0, linecolor='black', annot_kws={"size": 14},square=True,robust=True,cmap='bone_r',vmin=0, vmax=1 )
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
for _, spine in ax.spines.items():
spine.set_visible(True)
show_annot_array = bad >= 0.01
for text, show_annot in zip(ax.texts, (element for row in show_annot_array for element in row)):
text.set_visible(show_annot)
# Changing the colour map
from matplotlib import colors
#cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])
cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])
bounds=[0.0, 0.025, 0.1, 0.3,0.8]
norm = colors.BoundaryNorm(bounds, cmap.N, clip=False)
cbar_kws=dict(ticks=[.025, .1, .3,0.8])
ax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=.3, annot_kws={"size": 14},square=True,robust=True,cmap=cmap, norm=norm,cbar_kws=cbar_kws )
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])
bounds=[0.0, 0.025, 0.1, 0.3,0.8]
norm = colors.BoundaryNorm(bounds, cmap.N, clip=False)
cbar_kws=dict(ticks=[.025, .1, .3,0.8])
ax = sbn.heatmap(bad,annot=True, fmt='.2f', linewidths=.3, annot_kws={"size": 14},square=True,robust=True,cmap=cmap, norm=norm, cbar_kws=cbar_kws )
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])
bounds=[0.0, 0.025, 0.1, 0.3,0.8]
norm = colors.BoundaryNorm(bounds, cmap.N, clip=False)
cbar_kws=dict(ticks=[.025, .1, .3,0.8])
ax = sbn.heatmap(good,annot=True, fmt='.2f', linewidths=.3, annot_kws={"size": 14},square=True,robust=True, cmap=cmap, norm=norm,vmin=0,vmax=1,cbar_kws=cbar_kws )
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
cbar_kws={'ticks': '[0.0, 0.2, 0.4, 0.6, 0.8, 1.0]'}
# Playing with pandas and getting more exotic
df = pd.DataFrame(bad, columns=["1","2","3","4","5","6","7","8","9"])
#https://towardsdatascience.com/better-heatmaps-and-correlation-matrix-plots-in-python-41445d0f2bec
def heatmap(x, y, x1,y1, **kwargs):
if 'color' in kwargs:
color = kwargs['color']
else:
color = [1]*len(x)
if 'palette' in kwargs:
palette = kwargs['palette']
n_colors = len(palette)
else:
n_colors = 256 # Use 256 colors for the diverging color palette
palette = sbn.color_palette("Blues", n_colors)
if 'color_range' in kwargs:
color_min, color_max = kwargs['color_range']
else:
color_min, color_max = min(color), max(color) # Range of values that will be mapped to the palette, i.e. min and max possible correlation
def value_to_color(val):
if color_min == color_max:
return palette[-1]
else:
val_position = float((val - color_min)) / (color_max - color_min) # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1
ind = int(val_position * (n_colors - 1)) # target index in the color palette
return palette[ind]
if 'size' in kwargs:
size = kwargs['size']
else:
size = [1]*len(x)
if 'size_range' in kwargs:
size_min, size_max = kwargs['size_range'][0], kwargs['size_range'][1]
else:
size_min, size_max = min(size), max(size)
size_scale = kwargs.get('size_scale', 500)
def value_to_size(val):
if size_min == size_max:
return 1 * size_scale
else:
val_position = (val - size_min) * 0.99 / (size_max - size_min) + 0.01 # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1
return val_position * size_scale
if 'x_order' in kwargs:
x_names = [t for t in kwargs['x_order']]
else:
x_names = [t for t in sorted(set([v for v in x]))]
x_to_num = {p[1]:p[0] for p in enumerate(x_names)}
if 'y_order' in kwargs:
y_names = [t for t in kwargs['y_order']]
else:
y_names = [t for t in sorted(set([v for v in y]))]
y_to_num = {p[1]:p[0] for p in enumerate(y_names)}
plot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=0.1) # Setup a 1x10 grid
ax = plt.subplot(plot_grid[:,:-1]) # Use the left 14/15ths of the grid for the main plot
marker = kwargs.get('marker', 's')
kwargs_pass_on = {k:v for k,v in kwargs.items() if k not in [
'color', 'palette', 'color_range', 'size', 'size_range', 'size_scale', 'marker', 'x_order', 'y_order'
]}
print(x_names)
print(y_names)
print('here------------')
ax.scatter(
x=x1,
y=y1,
marker=marker,
s=[value_to_size(v) for v in size],
c=[value_to_color(v) for v in color],
**kwargs_pass_on
)
ax.set_xticks([v for k,v in x_to_num.items()])
ax.set_xticklabels([k for k in x_to_num], rotation=45, horizontalalignment='right')
ax.set_yticks([v for k,v in y_to_num.items()])
ax.set_yticklabels([k for k in y_to_num])
ax.grid(False, 'major')
ax.grid(True, 'minor')
ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)
ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)
ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])
ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])
ax.set_facecolor('#F1F1F1')
# Add color legend on the right side of the plot
if color_min < color_max:
ax = plt.subplot(plot_grid[:,-1]) # Use the rightmost column of the plot
col_x = [0]*len(palette) # Fixed x coordinate for the bars
bar_y=np.linspace(color_min, color_max, n_colors) # y coordinates for each of the n_colors bars
bar_height = bar_y[1] - bar_y[0]
ax.barh(
y=bar_y,
width=[5]*len(palette), # Make bars 5 units wide
left=col_x, # Make bars start at 0
height=bar_height,
color=palette,
linewidth=0
)
ax.set_xlim(1, 2) # Bars are going from 0 to 5, so lets crop the plot somewhere in the middle
ax.grid(False) # Hide grid
ax.set_facecolor('white') # Make background white
ax.set_xticks([]) # Remove horizontal ticks
ax.set_yticks(np.linspace(min(bar_y), max(bar_y), 3)) # Show vertical ticks for min, middle and max
ax.yaxis.tick_right() # Show vertical ticks on the right
def corrplot(data, size_scale=500, marker='s'):
corr = pd.melt(data.reset_index(), id_vars='index')
print(corr)
corr.columns = ['index', 'variable', 'value']
x_names = [t for t in sorted(set([v for v in corr['index']]))]
x_to_num = {p[1]:p[0] for p in enumerate(x_names)}
x=[x_to_num[v] for v in corr['index']]
y_names = [t for t in sorted(set([v for v in corr['index']]))]
y_to_num = {p[1]:p[0] for p in enumerate(y_names)}
y=[y_to_num[v] for v in corr['index']]
heatmap(
corr['index'], corr['value'],x1,y1,
color=corr['value'], color_range=[0, 1],
palette=sbn.diverging_palette(20, 220, n=256),
size=corr['value'].abs(), size_range=[0,1],
marker=marker,
x_order=data.columns,
y_order=data.columns[::-1],
size_scale=size_scale
)
corrplot(df)
corr = pd.melt(df.reset_index(), id_vars='index')
print(corr)
x_names = [t for t in sorted(set([v for v in corr['index']]))]
x_to_num = {p[1]:p[0] for p in enumerate(x_names)}
x1=[x_to_num[v] for v in corr['index']]
y_names = [t for t in sorted(set([v for v in corr['variable']]))]
y_to_num = {p[1]:p[0] for p in enumerate(y_names)}
y1=[y_to_num[v] for v in corr['variable']]
def value_to_size(val):
if size_min == size_max:
return 1 * size_scale
else:
val_position = (val - size_min) * 0.99 / (size_max - size_min) + 0.01 # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1
return val_position * size_scale
value_names = [t for t in sorted(set([v for v in corr['value']]))]
value = []
for v in corr['value']:
value.append(v)
for v in corr['value']:
print (v)
n_colors = 256 # Use 256 colors for the diverging color palette
palette = sbn.cubehelix_palette(n_colors)
mapping = linspace(0,1,256)
c_index = np.digitize(value, mapping)
plot_colors =[]
for i in c_index:
plot_colors.append(palette[i])
s =np.array(value)*4000
fig = figsize(10,10)
plot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=0.1) # Setup a 1x10 grid
ax = plt.subplot(plot_grid[:,:-1]) # Use the left 14/15ths of the grid for the main plot
ax.scatter(x1,y1,marker='s',s=s,c=plot_colors)
sbn.despine()
ax.grid(False, 'major')
ax.grid(True, 'minor', color='white')
ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)
ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)
ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])
ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])
ax.set_facecolor((0,0,0))
plt.gca().invert_yaxis()
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
xlabel(r'$\lambda$ index')
ylabel(r'$\lambda$ index')
def value_to_size(val, vlaue):
size_scale = 500
size = [1]*len(value)
size_min, size_max = min(size), max(size)
if size_min == size_max:
return 1 * size_scale
else:
val_position = (val - size_min) * 0.99 / (size_max - size_min) + 0.01 # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1
return val_position * size_scale
heatmap2
value_to_size(value[5], value)
from biokit.viz import corrplot
c = corrplot.Corrplot(df)
c.plot()
def plot(index, columns):
values = "bad_status"
vmax = 0.10
cellsize_vmax = 10000
g_ratio = df.pivot_table(index=index, columns=columns, values=values, aggfunc="mean")
g_size = df.pivot_table(index=index, columns=columns, values=values, aggfunc="size")
annot = np.vectorize(lambda x: "" if np.isnan(x) else "{:.1f}%".format(x * 100))(g_ratio)
# adjust visual balance
figsize = (g_ratio.shape[1] * 0.8, g_ratio.shape[0] * 0.8)
cbar_width = 0.05 * 6.0 / figsize[0]
f, ax = plt.subplots(1, 1, figsize=figsize)
cbar_ax = f.add_axes([.91, 0.1, cbar_width, 0.8])
heatmap2(g_ratio, ax=ax, cbar_ax=cbar_ax,
vmax=vmax, cmap="PuRd", annot=annot, fmt="s", annot_kws={"fontsize":"small"},
cellsize=g_size, cellsize_vmax=cellsize_vmax,
square=True, ax_kws={"title": "{} x {}".format(index, columns)})
plt.show()
"""
This script is created by modifying seaborn matrix.py
in https://github.com/mwaskom/seaborn, by Michael L. Waskom
"""
from __future__ import division
import itertools
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.patheffects as patheffects
import numpy as np
import pandas as pd
from scipy.cluster import hierarchy
import seaborn as sns
from seaborn import cm
from seaborn.axisgrid import Grid
from seaborn.utils import (despine, axis_ticklabels_overlap, relative_luminance, to_utf8)
from seaborn.external.six import string_types
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(to_utf8, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(to_utf8, i)) for i in index.values]
else:
return index.values
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatabile and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, np.bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=np.bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) \
and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask
class _HeatMapper2(object):
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cellsize, cellsize_vmax,
cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None, ax_kws=None, rect_kws=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convet to DataFrame
mask = _matrix_mask(data, mask)
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int):
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is True:
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is False:
xticklabels = []
ytickevery = 1
if isinstance(yticklabels, int):
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is True:
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is False:
yticklabels = []
# Get the positions and used label for the ticks
nx, ny = data.T.shape
if not len(xticklabels):
self.xticks = []
self.xticklabels = []
elif isinstance(xticklabels, string_types) and xticklabels == "auto":
self.xticks = "auto"
self.xticklabels = _index_to_ticklabels(data.columns)
else:
self.xticks, self.xticklabels = self._skip_ticks(xticklabels,
xtickevery)
if not len(yticklabels):
self.yticks = []
self.yticklabels = []
elif isinstance(yticklabels, string_types) and yticklabels == "auto":
self.yticks = "auto"
self.yticklabels = _index_to_ticklabels(data.index)
else:
self.yticks, self.yticklabels = self._skip_ticks(yticklabels,
ytickevery)
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Determine good default values for cell size
self._determine_cellsize_params(plot_data, cellsize, cellsize_vmax)
# Sort out the annotations
if annot is None:
annot = False
annot_data = None
elif isinstance(annot, bool):
if annot:
annot_data = plot_data
else:
annot_data = None
else:
try:
annot_data = annot.values
except AttributeError:
annot_data = annot
if annot.shape != plot_data.shape:
raise ValueError('Data supplied to "annot" must be the same '
'shape as the data to plot.')
annot = True
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.annot_data = annot_data
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws
#self.annot_kws.setdefault('color', "black")
self.annot_kws.setdefault('ha', "center")
self.annot_kws.setdefault('va', "center")
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws
self.cbar_kws.setdefault('ticks', mpl.ticker.MaxNLocator(6))
self.ax_kws = {} if ax_kws is None else ax_kws
self.rect_kws = {} if rect_kws is None else rect_kws
# self.rect_kws.setdefault('edgecolor', "black")
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
calc_data = plot_data.data[~np.isnan(plot_data.data)]
if vmin is None:
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
if vmax is None:
vmax = np.percentile(calc_data, 98) if robust else calc_data.max()
self.vmin, self.vmax = vmin, vmax
# Choose default colormaps if not provided
if cmap is None:
if center is None:
self.cmap = cm.rocket
else:
self.cmap = cm.icefire
elif isinstance(cmap, string_types):
self.cmap = mpl.cm.get_cmap(cmap)
elif isinstance(cmap, list):
self.cmap = mpl.colors.ListedColormap(cmap)
else:
self.cmap = cmap
# Recenter a divergent colormap
if center is not None:
vrange = max(vmax - center, center - vmin)
normlize = mpl.colors.Normalize(center - vrange, center + vrange)
cmin, cmax = normlize([vmin, vmax])
cc = np.linspace(cmin, cmax, 256)
self.cmap = mpl.colors.ListedColormap(self.cmap(cc))
def _determine_cellsize_params(self, plot_data, cellsize, cellsize_vmax):
if cellsize is None:
self.cellsize = np.ones(plot_data.shape)
self.cellsize_vmax = 1.0
else:
if isinstance(cellsize, pd.DataFrame):
cellsize = cellsize.values
self.cellsize = cellsize
if cellsize_vmax is None:
cellsize_vmax = cellsize.max()
self.cellsize_vmax = cellsize_vmax
def _skip_ticks(self, labels, tickevery):
"""Return ticks and labels at evenly spaced intervals."""
n = len(labels)
if tickevery == 0:
ticks, labels = [], []
elif tickevery == 1:
ticks, labels = np.arange(n) + .5, labels
else:
start, end, step = 0, n, tickevery
ticks = np.arange(start, end, step) + .5
labels = labels[start:end:step]
return ticks, labels
def _auto_ticks(self, ax, labels, axis):
"""Determine ticks and ticklabels that minimize overlap."""
transform = ax.figure.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(transform)
size = [bbox.width, bbox.height][axis]
axis = [ax.xaxis, ax.yaxis][axis]
tick, = axis.set_ticks([0])
fontsize = tick.label.get_size()
max_ticks = int(size // (fontsize / 72))
if max_ticks < 1:
return [], []
tick_every = len(labels) // max_ticks + 1
tick_every = 1 if tick_every == 0 else tick_every
ticks, labels = self._skip_ticks(labels, tick_every)
return ticks, labels
def plot(self, ax, cax):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
#despine(ax=ax, left=True, bottom=True)
# Draw the heatmap and annotate
height, width = self.plot_data.shape
xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)
data = self.plot_data.data
cellsize = self.cellsize
mask = self.plot_data.mask
if not isinstance(mask, np.ndarray) and not mask:
mask = np.zeros(self.plot_data.shape, np.bool)
annot_data = self.annot_data
if not self.annot:
annot_data = np.zeros(self.plot_data.shape)
# Draw rectangles instead of using pcolormesh
# Might be slower than original heatmap
for x, y, m, val, s, an_val in zip(xpos.flat, ypos.flat, mask.flat, data.flat, cellsize.flat, annot_data.flat):
if not m:
vv = (val - self.vmin) / (self.vmax - self.vmin)
size = np.clip(s / self.cellsize_vmax, 0.1, 1.0)
color = self.cmap(vv)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, **self.rect_kws)
ax.add_patch(rect)
if self.annot:
annotation = ("{:" + self.fmt + "}").format(an_val)
text = ax.text(x, y, annotation, **self.annot_kws)
print(text)
# add edge to text
text_luminance = relative_luminance(text.get_color())
text_edge_color = ".15" if text_luminance > .408 else "w"
text.set_path_effects([mpl.patheffects.withStroke(linewidth=1, foreground=text_edge_color)])
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Set other attributes
ax.set(**self.ax_kws)
if self.cbar:
norm = mpl.colors.Normalize(vmin=self.vmin, vmax=self.vmax)
scalar_mappable = mpl.cm.ScalarMappable(cmap=self.cmap, norm=norm)
scalar_mappable.set_array(self.plot_data.data)
cb = ax.figure.colorbar(scalar_mappable, cax, ax, **self.cbar_kws)
cb.outline.set_linewidth(0)
# if kws.get('rasterized', False):
# cb.solids.set_rasterized(True)
# Add row and column labels
if isinstance(self.xticks, string_types) and self.xticks == "auto":
xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)
else:
xticks, xticklabels = self.xticks, self.xticklabels
if isinstance(self.yticks, string_types) and self.yticks == "auto":
yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)
else:
yticks, yticklabels = self.yticks, self.yticklabels
ax.set(xticks=xticks, yticks=yticks)
xtl = ax.set_xticklabels(xticklabels)
ytl = ax.set_yticklabels(yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
ax.figure.draw(ax.figure.canvas.get_renderer())
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Invert the y axis to show the plot in matrix form
ax.invert_yaxis()
def heatmap2(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=None, fmt=".2g", annot_kws=None,
cellsize=None, cellsize_vmax=None,
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, xticklabels="auto", yticklabels="auto",
mask=None, ax=None, ax_kws=None, rect_kws=None):
# Initialize the plotter object
plotter = _HeatMapper2(data, vmin, vmax, cmap, center, robust,
annot, fmt, annot_kws,
cellsize, cellsize_vmax,
cbar, cbar_kws, xticklabels,
yticklabels, mask, ax_kws, rect_kws)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
# delete grid
ax.grid(False)
plotter.plot(ax, cbar_ax)
return ax
fig =figsize(10,10)
ax = heatmap2(good,annot=True, fmt='.2f',cellsize=np.array(value),cellsize_vmax=1, annot_kws={"size": 13},square=True,robust=True,cmap='PiYG' )
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
ax.grid(False, 'major')
ax.grid(True, 'minor', color='black', alpha=0.3)
ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)
ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
fig =figsize(8,8)
ax = sbn.heatmap(good,annot=True, fmt='.2f', linewidths=.3, annot_kws={"size": 14},cmap=sbn.light_palette((210, 90, 60), input="husl") )
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
sbn.despine()
ax.grid(False, 'major')
ax.grid(True, 'minor', color='white')
ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)
ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)
text = ax.text(x, y, annotation, **self.annot_kws)
# add edge to text
text_luminance = relative_luminance(text.get_color())
text_edge_color = ".15" if text_luminance > .408 else "w"
text.set_path_effects([mpl.patheffects.withStroke(linewidth=1, foreground=text_edge_color)])
ax.text()
```
| github_jupyter |
*Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*
Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p tensorflow
```
# Model Zoo -- Convolutional Autoencoder with Deconvolutions
A convolutional autoencoder using deconvolutional layers that compresses 768-pixel MNIST images down to a 7x7x4 (196 pixel) representation.
```
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
##########################
### DATASET
##########################
mnist = input_data.read_data_sets("./", validation_size=0)
##########################
### SETTINGS
##########################
# Hyperparameters
learning_rate = 0.001
training_epochs = 5
batch_size = 128
# Architecture
hidden_size = 16
input_size = 784
image_width = 28
# Other
print_interval = 200
random_seed = 123
##########################
### GRAPH DEFINITION
##########################
g = tf.Graph()
with g.as_default():
tf.set_random_seed(random_seed)
# Input data
tf_x = tf.placeholder(tf.float32, [None, input_size], name='inputs')
input_layer = tf.reshape(tf_x, shape=[-1, image_width, image_width, 1])
###########
# Encoder
###########
# 28x28x1 => 28x28x8
conv1 = tf.layers.conv2d(input_layer, filters=8, kernel_size=(3, 3),
strides=(1, 1), padding='same',
activation=tf.nn.relu)
# 28x28x8 => 14x14x8
maxpool1 = tf.layers.max_pooling2d(conv1, pool_size=(2, 2),
strides=(2, 2), padding='same')
# 14x14x8 => 14x14x4
conv2 = tf.layers.conv2d(maxpool1, filters=4, kernel_size=(3, 3),
strides=(1, 1), padding='same',
activation=tf.nn.relu)
# 14x14x4 => 7x7x4
encode = tf.layers.max_pooling2d(conv2, pool_size=(2, 2),
strides=(2, 2), padding='same',
name='encoding')
###########
# Decoder
###########
# 7x7x4 => 14x14x8
deconv1 = tf.layers.conv2d_transpose(encode, filters=8,
kernel_size=(3, 3), strides=(2, 2),
padding='same',
activation=tf.nn.relu)
# 14x14x8 => 28x28x8
deconv2 = tf.layers.conv2d_transpose(deconv1, filters=8,
kernel_size=(3, 3), strides=(2, 2),
padding='same',
activation=tf.nn.relu)
# 28x28x8 => 28x28x1
logits = tf.layers.conv2d(deconv2, filters=1, kernel_size=(3,3),
strides=(1, 1), padding='same',
activation=None)
decode = tf.nn.sigmoid(logits, name='decoding')
##################
# Loss & Optimizer
##################
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=input_layer,
logits=logits)
cost = tf.reduce_mean(loss, name='cost')
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(cost, name='train')
# Saver to save session for reuse
saver = tf.train.Saver()
import numpy as np
##########################
### TRAINING & EVALUATION
##########################
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
np.random.seed(random_seed) # random seed for mnist iterator
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = mnist.train.num_examples // batch_size
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, c = sess.run(['train', 'cost:0'], feed_dict={'inputs:0': batch_x})
avg_cost += c
if not i % print_interval:
print("Minibatch: %03d | Cost: %.3f" % (i + 1, c))
print("Epoch: %03d | AvgCost: %.3f" % (epoch + 1, avg_cost / (i + 1)))
saver.save(sess, save_path='./autoencoder.ckpt')
%matplotlib inline
import matplotlib.pyplot as plt
##########################
### VISUALIZATION
##########################
n_images = 15
fig, axes = plt.subplots(nrows=2, ncols=n_images, sharex=True,
sharey=True, figsize=(20, 2.5))
test_images = mnist.test.images[:n_images]
with tf.Session(graph=g) as sess:
saver.restore(sess, save_path='./autoencoder.ckpt')
decoded = sess.run('decoding:0', feed_dict={'inputs:0': test_images})
for i in range(n_images):
for ax, img in zip(axes, [test_images, decoded]):
ax[i].imshow(img[i].reshape((image_width, image_width)), cmap='binary')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/suyash091/EEG-MULTIPLE-CHANNEL/blob/master/1%20channel.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# Mindwave | 1 channel | 512 sampling rate
```
```
from google.colab import drive
drive.mount('/content/drive')
from os.path import isfile
import zipfile
import numpy as np
import random
dataset_path = './data/MW.zip'
def get_dataset_file():
if not isfile(dataset_path):
import urllib
origin = (
'http://www.mindbigdata.com/opendb/MindBigData-MW-v1.0.zip'
)
print('Downloading data from %s' % origin)
urlretrieve(origin, dataset_path)
return open(dataset_path, 'rb')
def get_datasets():
f = get_dataset_file()
zf = zipfile.ZipFile(f)
data = [ line for line in zf.open('MW.txt')]
entire_dataset = []
current_event = np.zeros(1024 * 1 + 2)
print('Reading data file')
i = 0
for l in data:
#print(str(l))
ids, event, device, channel, code, size, data = l.decode("utf-8").split('\t')
signals = np.array([float(val) for val in data.split(',')])
current_event[1+ i*1024:1+ i*1024 + min(len(signals), 1024)] = signals[:1024]
i += 1
if i == 1: # we assume all channels from an event are in sequence
current_event[-1] = int(code)
current_event[0] = min(len(signals), 1024)
entire_dataset.append(current_event)
current_event = np.zeros(1024 * 1 + 2)
i = 0
random.seed(111) # deterministic
random.shuffle(entire_dataset)
entire_dataset = np.array(entire_dataset)
return entire_dataset[:60000], entire_dataset[60000:]
def split_into_subsequences(data, n_sequences, length):
output = np.zeros((data.shape[0]*n_sequences, length*14+1))
for i in range(data.shape[0]):
if n_sequences == 1:
steps = 0
else:
steps = (data[i, 0] - length) / (n_sequences - 1)
for j in range(n_sequences):
output[i*n_sequences+j,:length] = data[i, j*steps: j*steps+length]
output[i*n_sequences+j,length:length*2] = data[i, j*steps+512: j*steps+512+length]
output[i*n_sequences+j,length*2:length*3] = data[i, j*steps+512*2: j*steps+512*2+length]
output[i*n_sequences+j,length*3:length*4] = data[i, j*steps+512*3: j*steps+512*3+length]
output[i*n_sequences+j,-1] = data[i, -1]
return output
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from urllib.request import urlretrieve
#from sklearn.gaussian_process import GaussianProcess
(25/2050)*3586
train, test = get_datasets()
print(train.shape)
print(test.shape)
train[0].shape
len(train[-1])
#train = train[:int(len(train)/3),:]
#test = test[:int(len(test)/3), :]
#train[train[:,-1] >= 0,-1] = 0
#test[test[:,-1] >= 0,-1] = 0
#train[:,-1] = train[:,-1] + 1
#test[:,-1] = test[:,-1] + 1
train_target = train[:,-1]
test_target = test[:,-1]
DECISIONBOUNDARY = 0
#FastFourierTransformation apply to the sets using the 4 sensors
train_f = np.zeros(train.shape)
for i in range(len(train_f)):
length = train[i][0]
train_f[i][0] = train[i][0]
train_f[i][-1] = train[i][-1]
for j in range(1):
train_f[int(i)][int(1+1024*j):int(1+1024*j+length)] = np.abs(np.fft.fft(train[int(i)][int(1+1024*j):int(1+1024*j+length)]))
test_f = np.zeros(test.shape)
for i in range(len(test_f)):
length = test[i,0]
test_f[i,0] = test[i,0]
test_f[i,-1] = test[i,-1]
for j in range(1):
test_f[i][int(1+1024*j):int(1+1024*j+length)] = np.abs(np.fft.fft(test[i][int(1+1024*j):int(1+1024*j+length)]))
#PCA with 25 components
pca = PCA(n_components = 100)
train_principal = pca.fit_transform(train_f)
test_principal = pca.transform(test_f)
test_principal.shape
#KNN
erreur = []
x = np.arange(1,20,1)
for n in (x):
# train set
neigh = KNeighborsClassifier(n_neighbors=n)
neigh.fit(train_principal, train[:,-1])
#test set
total_correct = 0
for i in range(test_principal.shape[0]):
if neigh.predict(test_principal[i].reshape(1, -1))[0] == test[i,-1]:
total_correct += 1
erreur.append(float(total_correct) / test_principal.shape[0] * 100)
print('Percentage correct %d:' % n, float(total_correct) / test_principal.shape[0] * 100)
#Import models from scikit learn module:
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold #For K-fold cross validation
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn import metrics
import numpy as np
from sklearn.metrics import roc_curve, auc, precision_score, confusion_matrix, explained_variance_score, max_error, mean_absolute_error, mean_squared_error
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, Lasso
#model = RandomForestClassifier(n_estimators=100)
#model.fit(train_principal,train[:,-1])
predictions = model.predict(test_principal)
prc=precision_score(predictions,test[:,-1], average=None)
cfm=confusion_matrix(predictions,test[:,-1])
accuracy = metrics.accuracy_score(predictions,test[:,-1])
print(prc,cfm,accuracy)
#print(explained_variance_score(predictions,test))
#print(max_error(predictions,test))
#print(mean_absolute_error(predictions, test, multioutput='raw_values'))
#print(mean_squared_error(predictions, test, multioutput='raw_values'))
model = LogisticRegression()
model.fit(train_principal,train[:,-1])
predictions = model.predict(test_principal)
prc=precision_score(predictions,test[:,-1], average=None)
cfm=confusion_matrix(predictions,test[:,-1])
accuracy = metrics.accuracy_score(predictions,test[:,-1])
print(prc,cfm,accuracy)
model = LinearRegression()
#model.fit(train_principal,train[:,-1])
#predictions = model.predict(test_principal)
#prc=precision_score(predictions,test[:,-1], average=None)
print(explained_variance_score(predictions,test[:,-1]))
print(max_error(predictions,test[:,-1]))
print(mean_absolute_error(predictions, test[:,-1], multioutput='raw_values'))
print(mean_squared_error(predictions, test[:,-1], multioutput='raw_values'))
model = RandomForestRegressor()
model.fit(train_principal,train[:,-1])
predictions = model.predict(test_principal)
print(explained_variance_score(predictions,test[:,-1]))
print(max_error(predictions,test[:,-1]))
print(mean_absolute_error(predictions, test[:,-1], multioutput='raw_values'))
print(mean_squared_error(predictions, test[:,-1], multioutput='raw_values'))
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import numpy as np
!pip install scipy
from scipy import signal
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(1024, input_dim=100, activation='relu'))
model.add(tf.keras.layers.Dense(4096, activation='relu'))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Dense(4096, activation='relu'))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Dense(4096, activation='relu'))
model.add(tf.keras.layers.Dense(11, activation='softmax'))
alpha = 2e-3
batch_size = 256
num_epochs = 3000
model.compile(
optimizer=tf.train.GradientDescentOptimizer(learning_rate=alpha, ),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['sparse_categorical_accuracy', 'accuracy']
)
History = model.fit(
x = train_principal,
y = train[:,-1],
batch_size = batch_size,
epochs=num_epochs,
#validation_data = (x_valid, y_valid),
#callbacks = [checkPointer,tensorBoard]
)
predictions = model.predict(test_principal)
```
| github_jupyter |
# 📝 Exercise M3.02
The goal is to find the best set of hyperparameters which maximize the
generalization performance on a training set.
Here again with limit the size of the training set to make computation
run faster. Feel free to increase the `train_size` value if your computer
is powerful enough.
```
import numpy as np
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
from sklearn.model_selection import train_test_split
data_train, data_test, target_train, target_test = train_test_split(
data, target, train_size=0.2, random_state=42)
```
In this exercise, we will progressively define the classification pipeline
and later tune its hyperparameters.
Our pipeline should:
* preprocess the categorical columns using a `OneHotEncoder` and use a
`StandardScaler` to normalize the numerical data.
* use a `LogisticRegression` as a predictive model.
Start by defining the columns and the preprocessing pipelines to be applied
on each group of columns.
```
from sklearn.compose import make_column_selector as selector
# Write your code here.
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
# Write your code here.
```
Subsequently, create a `ColumnTransformer` to redirect the specific columns
a preprocessing pipeline.
```
from sklearn.compose import ColumnTransformer
# Write your code here.
```
Assemble the final pipeline by combining the above preprocessor
with a logistic regression classifier. Force the maximum number of
iterations to `10_000` to ensure that the model will converge.
```
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
# Write your code here.
```
Use `RandomizedSearchCV` with `n_iter=20` to find the best set of
hyperparameters by tuning the following parameters of the `model`:
- the parameter `C` of the `LogisticRegression` with values ranging from
0.001 to 10. You can use a log-uniform distribution
(i.e. `scipy.stats.loguniform`);
- the parameter `with_mean` of the `StandardScaler` with possible values
`True` or `False`;
- the parameter `with_std` of the `StandardScaler` with possible values
`True` or `False`.
Once the computation has completed, print the best combination of parameters
stored in the `best_params_` attribute.
```
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import loguniform
# Write your code here.
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import math
import json
%matplotlib inline
# read in the json files
portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)
profile = pd.read_json('data/profile.json', orient='records', lines=True)
transcript = pd.read_json('data/transcript.json', orient='records', lines=True)
def id_mapper(df , column):
'''
Map a column in a DataFrame and create a dict to change its value to a sequence (1,2,3...) for easier use.
INPUT:
df - (DataFrame)
column - (str) name of the column to create dictionary
OUTPUT:
coded_dict - (dict) A dictionary with the given column values as key and the 'new' encoded sequence as value
'''
coded_dict = dict()
cter = 1
for x in df[column]:
if x not in coded_dict:
coded_dict[x] = cter
cter+=1
return coded_dict
def offer_type_mapper(df=portfolio):
'''
Create a data frame to relate each offer with offer type
IMPUT: df - (DataFrame) - portfolio as default dataframe
OUTPUT:
offer_type - (DataFrame) - relation between offer id and type of offer
coded_dict - (dict) - relation between type offer sequence and real type offer (see id_mapper)
'''
# get sequence to name type of offers
coded_dict = id_mapper(df, 'offer_type')
coded_df = df.replace({"offer_type":coded_dict})
offer_type = coded_df[['id' , 'offer_type' , 'duration']]
return offer_type , coded_dict
def arrange_events(df , offer_df):
'''
Rearange the dataframe -transcript- by merging each offer into one row, creating columns for each event and time as values.
IMPUT: df (DataFrame) - default Dataframe is transcript
OUTPUT: df (DataFrame) - modified dataFrame
'''
# read dictionary from 'value' feature and create columns
df = pd.concat([df.drop(['value'], axis=1), df['value'].apply(pd.Series)], axis=1)
# merge offer id and offer_id columns
df['offer id'] = df['offer id'].combine_first(df['offer_id'])
df = df.drop(columns = ['offer_id'])
# split into three dataFrames and then merge rows with transaction and offer completed in the same time
df1 = df[df['event'] == 'offer completed'][['person' , 'event' , 'time' , 'offer id' , 'reward']]
df2 = df[df['event'] == 'transaction'][['person' , 'time' , 'amount']]
df3 = df[df['event'] != 'offer completed']
df3 = df3[df3['event'] != 'transaction'][['person' , 'event' , 'time' , 'offer id']]
# merge the two dataFrames on time
df_trans_completed = pd.merge(df1, df2, how='outer', on=['person', 'time'])
# merge with main dataFrames
df = pd.merge(df3, df_trans_completed, how='outer', on=['person', 'time' , 'event' , 'offer id'])
# create columns of type of event with the value of time
df = pd.concat([df, df.pivot_table(values='time', index=df.index, columns='event', aggfunc='first')], axis=1, sort=False)
# fill NaN values in the offer id feature as 'no offer' to keep track of the transactions without an offer
df['offer id'] = df['offer id'].fillna(value = 'no offer')
df = df.rename(columns={'person':'user id'})
# merge with offer_type dataframe
df = pd.merge(df, offer_df, how='outer', on=['offer id'])
df.rename(columns={'offer_type': 'offer type'} , inplace = True)
return df
def fill_amount(df):
dict_values = {}
index_lst = []
a = df.loc[(df['event'] == 'offer received') & (df['offer type'] == 2)]
for i in range(a.shape[0]):
b = df.loc[(df['time'] >= a['time'].values[i]) & (df['time'] <= (a['time'].values[i] + a['duration'].values[i])) & (df['offer id'] == 'no offer')]
if (b.shape[0] != 0):
index_lst.append(b.index[0])
c = b['amount'].to_list()[0]
dict_values.update({a.index.to_list()[i]: c})
else:
dict_values.update({a.index.to_list()[i]: np.nan})
df["amount"].fillna(dict_values, inplace=True)
df.drop(index=index_lst, axis=0 , inplace=True)
return df
def offer_merge(df):
'''
For each user, map and select rows of a singular offer and merge them into one.
IMPUT: df - (DataFrame) - modified transcription as default dataframe.
OUTPUT: df - (DataFrame) - rearange user data where each offer is in one row.
'''
offers_received_lst = df['offer id'].unique().tolist()
total_offers_received = df['offer received'].count()
temp_df = df.head(0)
user_id = df['user id'].unique()[0]
for offer in offers_received_lst:
#create data frame of an offer
offer_df = df[df['offer id'] == offer].copy()
# check if the same offer has been receved more than one time if so, create flags to treat each offer independently.
if offer_df['offer received'].count() > 1:
cter = 0
flag = []
#create list to flag each offer
for index, row in offer_df.iterrows():
if not np.isnan(row['offer received']):
cter+=1
flag.append(cter)
else:
flag.append(cter)
offer_df['flag'] = flag
offer_df = offer_df.groupby(['flag' , 'offer id']).mean().reset_index().drop(columns='flag')
else:
offer_df = offer_df.groupby('offer id').mean().reset_index()
temp_df = temp_df.append(offer_df , sort=False)
temp_df = temp_df.reset_index()
temp_df = temp_df.drop(columns=['index'])
df = temp_df
return df , user_id
def check_completed_offers(df , user_id):
'''
For a given user, checks and drop transactions that were not influenced by an offer
IMPUT: df - (DataFrame)
OUTPUT: df - (DataFrame) - rearange data
'''
# fill NaN values with 0 for offers that were not completed
df[['reward' , 'amount']] = df[['reward' , 'amount']].fillna(value = 0)
# add column with the type of offer
# df = pd.merge(df, map_offer_type, how='left', left_on=['offer id'] , right_on=['id'])
# fill with offer type 4, for transactions that are not related with an offer
df['offer type'] = df['offer type'].fillna(value = 4)
df['user id'] = df['user id'].fillna(value = user_id)
# check if an offer was completed before it was viewed or if it was not viewed, if so, drop it (the offer did not influenciate the transaction)
for row in range(len(df)):
if df.loc[row]['offer viewed'] > df.loc[row]['offer completed']:
df = df.drop([row])
elif np.isnan(df.loc[row]['offer viewed']) and not np.isnan(df.loc[row]['offer completed']):
df = df.drop([row])
else:
pass
return df
def get_events(df):
'''
for each user rearange transactions influenced by an offer
and for each type of offer get:
transaction amount, number of offers recived, number of offers viewed and number of offers completed
note: it takes some time to process
IMPUT: df - (dtaFrame)
OUTPUT:
amount_lst (lst) - list of dictionaries that contains amount spend and type of offer for each user
offers_lst (lst) - list of dictionaries that contains number of offers recived for each type
offers_view_lst (lst) - list of dictionaries that contains number of offers viewed for each type
offers_completed_lst (lst) - list of dictionaries that contains number of offers completed for each type
'''
user_id_lst = profile['id'].tolist()
amount_lst = []
offers_lst = []
offers_view_lst = []
offers_completed_lst = []
for user in user_id_lst:
user_events = df[df['user id'] == user]
user_fill_amount = fill_amount(user_events)
user_events, user_id = offer_merge(user_fill_amount)
user_events = check_completed_offers (user_events , user_id)
amount = {'user id' : user}
offers = {'user id' : user}
offers_view = {'user id' : user}
offers_completed = {'user id' : user}
amount.update(user_events.groupby('offer type').mean()['amount'].to_dict())
offers.update(user_events.groupby('offer type').count()['offer id'].to_dict())
offers_view.update(user_events.groupby('offer type').count()['offer viewed'].to_dict())
offers_completed.update(user_events.groupby('offer type').count()['offer completed'].to_dict())
amount_lst.append(amount)
offers_lst.append(offers)
offers_view_lst.append(offers_view)
offers_completed_lst.append(offers_completed)
return amount_lst , offers_lst , offers_view_lst , offers_completed_lst
def df_from_lst (lst):
'''
create dataframe from a list of dictionaries
IMPUT: lst (list)
OUTPUT: DF (dataFrame)
'''
df = pd.DataFrame(lst).drop(columns=4)
df.fillna(value = 0 , inplace = True)
return df
```
# main
```
# split gender into dummies columns
profile_mod = pd.concat([profile , pd.get_dummies(profile['gender'])],axis=1)
profile_mod.drop(['gender' , 'became_member_on'],axis=1, inplace=True)
#map offer type
map_offer_type, dict_offer_type = offer_type_mapper(portfolio)
#transform days
map_offer_type['duration'] = map_offer_type['duration'] * 24
map_offer_type.rename(columns={'id': 'offer id'} , inplace = True)
map_offer_type
# arrange transcript df
arrange_transcript = arrange_events(transcript , map_offer_type)
#note: this will take some time to execute, you can grab a coffee ;)
amount_lst , offers_lst , offers_view_lst , offers_completed_lst = get_events(arrange_transcript)
# Amount data
amount_type = pd.DataFrame(amount_lst)
amount_type.rename(columns={1: 'type 1', 2: 'type 2', 3: 'type 3' , 4: 'type 4'} , inplace = True)
user_offers = df_from_lst (offers_lst)
offers_viewed = df_from_lst (offers_view_lst)
offers_completed = df_from_lst (offers_completed_lst)
user_offers.rename(columns={1: 'offers type 1', 2: 'offers type 2', 3: 'offers type 3'} , inplace = True)
offers_viewed.rename(columns={1: 'viewed type 1', 2: 'viewed type 2', 3: 'viewed type 3'} , inplace = True)
offers_completed.rename(columns={1: 'completed type 1', 2: 'completed type 2', 3: 'completed type 3'} , inplace = True)
# merge data frames
amount_offer = pd.merge(amount_type, user_offers, how='inner' , on="user id")
amount_offer = pd.merge(amount_offer, offers_viewed, how='inner' , on="user id")
amount_offer = pd.merge(amount_offer, offers_completed, how='inner' , on="user id")
amount_offer
#split into 3 datasets
amount_type_1 = amount_offer[['user id' , 'type 1' , 'offers type 1' , 'viewed type 1' , 'completed type 1']].copy() # 'dif 1'
amount_type_2 = amount_offer[['user id' , 'type 2' , 'offers type 2' , 'viewed type 2' , 'completed type 2']].copy()
amount_type_3 = amount_offer[['user id' , 'type 3' , 'offers type 3' , 'viewed type 3' , 'completed type 3']].copy()
amount_type_4 = amount_offer[['user id' , 'type 4']].copy()
amount_type_1 = pd.merge(profile_mod, amount_type_1, how='inner' , left_on="id" , right_on="user id")
amount_type_1.drop(['id'],axis=1, inplace=True)
amount_type_2 = pd.merge(profile_mod, amount_type_2, how='inner' , left_on="id" , right_on="user id")
amount_type_2.drop(['id'],axis=1, inplace=True)
amount_type_3 = pd.merge(profile_mod, amount_type_3, how='inner' , left_on="id" , right_on="user id")
amount_type_3.drop(['id'],axis=1, inplace=True)
amount_type_4 = pd.merge(profile_mod, amount_type_4, how='inner' , left_on="id" , right_on="user id")
amount_type_4.drop(['id'],axis=1, inplace=True)
# clear NaN for each data Set, users that did not recive that type of offer
amount_type_1.dropna(axis=0 , inplace=True)
amount_type_1.drop(amount_type_1[amount_type_1['viewed type 1'] == 0].index , inplace=True)
amount_type_2.dropna(axis=0 , inplace=True)
amount_type_2.drop(amount_type_2[amount_type_2['viewed type 2'] == 0].index , inplace=True)
amount_type_3.dropna(axis=0 , inplace=True)
amount_type_3.drop(amount_type_3[amount_type_3['viewed type 3'] == 0].index , inplace=True)
amount_type_4.dropna(axis=0 , inplace=True)
# save dataFrames to CSV files
amount_type_1.to_csv('data/amount_type_1.csv' , index=False)
amount_type_2.to_csv('data/amount_type_2.csv' , index=False)
amount_type_3.to_csv('data/amount_type_3.csv' , index=False)
amount_type_4.to_csv('data/amount_type_4.csv' , index=False)
```
| github_jupyter |
# Object and Scene Detection using Amazon Rekognition
This notebook provides a walkthrough of [object detection API](https://docs.aws.amazon.com/rekognition/latest/dg/labels.html) in Amazon Rekognition to identify objects.
```
import boto3
from IPython.display import HTML, display, Image as IImage
from PIL import Image, ImageDraw, ImageFont
import time
import os
import sagemaker
import boto3
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
bucket = sagemaker_session.default_bucket()
region = boto3.Session().region_name
rekognition = boto3.client('rekognition')
s3 = boto3.client('s3')
!mkdir -p ./tmp
temp_folder = 'tmp/'
```
# Detect Objects in Image
```
imageName = 'content-moderation/media/cars.png'
display(IImage(url=s3.generate_presigned_url('get_object', Params={'Bucket': bucket, 'Key': imageName})))
```
# Call Rekognition to Detect Objects in the Image
https://docs.aws.amazon.com/rekognition/latest/dg/API_DetectLabels.html
```
detectLabelsResponse = rekognition.detect_labels(
Image={
'S3Object': {
'Bucket': bucket,
'Name': imageName,
}
}
)
```
# Review the Raw JSON Response from Rekognition
Show JSON response returned by Rekognition Labels API (Object Detection).
In the JSON response below, you will see Label, detected instances, confidence score and additional information.
```
display(detectLabelsResponse)
```
# Show Bounding Boxes Around Recognized Objects
```
def drawBoundingBoxes (sourceImage, boxes):
# blue, green, red, grey
colors = ((255,255,255),(255,255,255),(76,182,252),(52,194,123))
# Download image locally
imageLocation = temp_folder + os.path.basename(sourceImage)
s3.download_file(bucket, sourceImage, imageLocation)
# Draws BB on Image
bbImage = Image.open(imageLocation)
draw = ImageDraw.Draw(bbImage)
width, height = bbImage.size
col = 0
maxcol = len(colors)
line= 3
for box in boxes:
x1 = int(box[1]['Left'] * width)
y1 = int(box[1]['Top'] * height)
x2 = int(box[1]['Left'] * width + box[1]['Width'] * width)
y2 = int(box[1]['Top'] * height + box[1]['Height'] * height)
draw.text((x1,y1),box[0],colors[col])
for l in range(line):
draw.rectangle((x1-l,y1-l,x2+l,y2+l),outline=colors[col])
col = (col+1)%maxcol
imageFormat = "PNG"
ext = sourceImage.lower()
if(ext.endswith('jpg') or ext.endswith('jpeg')):
imageFormat = 'JPEG'
bbImage.save(imageLocation,format=imageFormat)
display(bbImage)
boxes = []
objects = detectLabelsResponse['Labels']
for obj in objects:
for einstance in obj["Instances"]:
boxes.append ((obj['Name'], einstance['BoundingBox']))
drawBoundingBoxes(imageName, boxes)
```
# Display List of Detected Objects
```
flaggedObjects = ["Car"]
for label in detectLabelsResponse["Labels"]:
if(label["Name"] in flaggedObjects):
print("Detected object:")
print("- {} (Confidence: {})".format(label["Name"], label["Confidence"]))
print(" - Parents: {}".format(label["Parents"]))
```
# Recognize Objects in Video
Object recognition in video is an async operation.
https://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html.
- First we start a label detection job which returns a Job Id.
- We can then call `get_label_detection` to get the job status and after job is complete, we can get object metadata.
- In production use cases, you would usually use StepFunction or SNS topic to get notified when job is complete.
```
videoName = 'content-moderation/media/GrandTour720.mp4'
strDetail = 'Objects detected in video<br>=======================================<br>'
strOverall = 'Objects in the overall video:<br>=======================================<br>'
# Show video in a player
s3VideoUrl = s3.generate_presigned_url('get_object', Params={'Bucket': bucket, 'Key': videoName})
videoTag = "<video controls='controls' autoplay width='640' height='360' name='Video' src='{0}'></video>".format(s3VideoUrl)
videoui = "<table><tr><td style='vertical-align: top'>{}</td></tr></table>".format(videoTag)
display(HTML(videoui))
```
# Call Rekognition to Start a Job for Object Detection
### Additional (Optional) Request Attributes
ClientRequestToken:
https://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html#rekognition-StartLabelDetection-request-ClientRequestToken
JobTag:
https://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html#rekognition-StartLabelDetection-request-JobTag
MinConfidence:
https://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html#rekognition-StartLabelDetection-request-MinConfidence
NotificationChannel:
https://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html#rekognition-StartLabelDetection-request-NotificationChannel
```
# Start video label recognition job
startLabelDetection = rekognition.start_label_detection(
Video={
'S3Object': {
'Bucket': bucket,
'Name': videoName,
}
},
)
labelsJobId = startLabelDetection['JobId']
display("Job Id: {0}".format(labelsJobId))
```
# Wait for Object Detection Job to Complete
```
# Wait for object detection job to complete
# In production use cases, you would usually use StepFunction or SNS topic to get notified when job is complete.
getObjectDetection = rekognition.get_label_detection(
JobId=labelsJobId,
SortBy='TIMESTAMP'
)
while(getObjectDetection['JobStatus'] == 'IN_PROGRESS'):
time.sleep(5)
print('.', end='')
getObjectDetection = rekognition.get_label_detection(
JobId=labelsJobId,
SortBy='TIMESTAMP')
display(getObjectDetection['JobStatus'])
```
# Review Raw JSON Response from Rekognition
* Show JSON response returned by Rekognition Object Detection API.
* In the JSON response below, you will see list of detected objects and activities.
* For each detected object, you will see the `Timestamp` of the frame within the video.
```
display(getObjectDetection)
```
# Display Recognized Objects in the Video
Display timestamps and objects detected at that time.
```
flaggedObjectsInVideo = ["Car"]
theObjects = {}
# Objects detected in each frame
for obj in getObjectDetection['Labels']:
ts = obj ["Timestamp"]
cconfidence = obj['Label']["Confidence"]
oname = obj['Label']["Name"]
if(oname in flaggedObjectsInVideo):
print("Found flagged object at {} ms: {} (Confidence: {})".format(ts, oname, round(cconfidence,2)))
strDetail = strDetail + "At {} ms: {} (Confidence: {})<br>".format(ts, oname, round(cconfidence,2))
if oname in theObjects:
cojb = theObjects[oname]
theObjects[oname] = {"Name" : oname, "Count": 1+cojb["Count"]}
else:
theObjects[oname] = {"Name" : oname, "Count": 1}
# Unique objects detected in video
for theObject in theObjects:
strOverall = strOverall + "Name: {}, Count: {}<br>".format(theObject, theObjects[theObject]["Count"])
# Display results
display(HTML(strOverall))
listui = "<table><tr><td style='vertical-align: top'>{}</td></tr></table>".format(strDetail)
display(HTML(listui))
```
# Worker Safety with Amazon Rekognition
You can use Amazon Rekognition to detect if certain objects are not present in the image or video. For example you can perform worker safety audit by revieweing images/video of a construction site and detecting if there are any workers without safety hat.
```
imageName = "content-moderation/media/hat-detection.png"
display(IImage(url=s3.generate_presigned_url('get_object', Params={'Bucket': bucket, 'Key': imageName})))
```
# Call Amazon Rekognition to Detect Objects in the Image
```
detectLabelsResponse = rekognition.detect_labels(
Image={
'S3Object': {
'Bucket': bucket,
'Name': imageName,
}
}
)
```
# Display Rekognition Response
```
display(detectLabelsResponse)
```
# Show Bounding Boxes Around Recognized Objects
```
def drawBoundingBoxes (sourceImage, boxes):
# blue, green, red, grey
colors = ((255,255,255),(255,255,255),(76,182,252),(52,194,123))
# Download image locally
imageLocation = temp_folder + os.path.basename(sourceImage)
s3.download_file(bucket, sourceImage, imageLocation)
# Draws BB on Image
bbImage = Image.open(imageLocation)
draw = ImageDraw.Draw(bbImage)
width, height = bbImage.size
col = 0
maxcol = len(colors)
line= 3
for box in boxes:
x1 = int(box[1]['Left'] * width)
y1 = int(box[1]['Top'] * height)
x2 = int(box[1]['Left'] * width + box[1]['Width'] * width)
y2 = int(box[1]['Top'] * height + box[1]['Height'] * height)
draw.text((x1,y1),box[0],colors[col])
for l in range(line):
draw.rectangle((x1-l,y1-l,x2+l,y2+l),outline=colors[col])
col = (col+1)%maxcol
imageFormat = "PNG"
ext = sourceImage.lower()
if(ext.endswith('jpg') or ext.endswith('jpeg')):
imageFormat = 'JPEG'
bbImage.save(imageLocation,format=imageFormat)
display(bbImage)
boxes = []
objects = detectLabelsResponse['Labels']
for obj in objects:
for einstance in obj["Instances"]:
boxes.append ((obj['Name'], einstance['BoundingBox']))
drawBoundingBoxes(imageName, boxes)
def matchPersonsAndHats(personsList, hardhatsList):
persons = []
hardhats = []
personsWithHats = []
for person in personsList:
persons.append(person)
for hardhat in hardhatsList:
hardhats.append(hardhat)
h = 0
matched = 0
totalHats = len(hardhats)
while(h < totalHats):
hardhat = hardhats[h-matched]
totalPersons = len(persons)
p = 0
while(p < totalPersons):
person = persons[p]
if(not (hardhat['BoundingBoxCoordinates']['x2'] < person['BoundingBoxCoordinates']['x1']
or hardhat['BoundingBoxCoordinates']['x1'] > person['BoundingBoxCoordinates']['x2']
or hardhat['BoundingBoxCoordinates']['y4'] < person['BoundingBoxCoordinates']['y1']
or hardhat['BoundingBoxCoordinates']['y1'] > person['BoundingBoxCoordinates']['y4']
)):
personsWithHats.append({'Person' : person, 'Hardhat' : hardhat})
del persons[p]
del hardhats[h - matched]
matched = matched + 1
break
p = p + 1
h = h + 1
return (personsWithHats, persons, hardhats)
def getBoundingBoxCoordinates(boundingBox, imageWidth, imageHeight):
x1 = 0
y1 = 0
x2 = 0
y2 = 0
x3 = 0
y3 = 0
x4 = 0
y4 = 0
boxWidth = boundingBox['Width']*imageWidth
boxHeight = boundingBox['Height']*imageHeight
x1 = boundingBox['Left']*imageWidth
y1 = boundingBox['Top']*imageWidth
x2 = x1 + boxWidth
y2 = y1
x3 = x2
y3 = y1 + boxHeight
x4 = x1
y4 = y3
return({'x1': x1, 'y1' : y1, 'x2' : x2, 'y2' : y2, 'x3' : x3, 'y3' : y3, 'x4' : x4, 'y4' : y4})
def getPersonsAndHardhats(labelsResponse, imageWidth, imageHeight):
persons = []
hardhats = []
for label in labelsResponse['Labels']:
if label['Name'] == 'Person' and 'Instances' in label:
for person in label['Instances']:
persons.append({'BoundingBox' : person['BoundingBox'], 'BoundingBoxCoordinates' : getBoundingBoxCoordinates(person['BoundingBox'], imageWidth, imageHeight), 'Confidence' : person['Confidence']})
elif ((label['Name'] == 'Hardhat' or label['Name'] == 'Helmet') and 'Instances' in label):
for hardhat in label['Instances']:
hardhats.append({'BoundingBox' : hardhat['BoundingBox'], 'BoundingBoxCoordinates' : getBoundingBoxCoordinates(hardhat['BoundingBox'], imageWidth, imageHeight), 'Confidence' : hardhat['Confidence']})
return (persons, hardhats)
s3Resource = boto3.resource('s3')
bucket = s3Resource.Bucket(bucket)
iojb = bucket.Object(imageName)
response = iojb.get()
file_stream = response['Body']
im = Image.open(file_stream)
imageWidth, imageHeight = im.size
persons, hardhats = getPersonsAndHardhats(detectLabelsResponse, imageWidth, imageHeight)
personsWithHats, personsWithoutHats, hatsWihoutPerson = matchPersonsAndHats(persons, hardhats)
personsWithHatsCount = len(personsWithHats)
personsWithoutHatsCount = len(personsWithoutHats)
hatsWihoutPersonCount = len(hatsWihoutPerson)
outputMessage = "Person(s): {}".format(personsWithHatsCount+personsWithoutHatsCount)
outputMessage = outputMessage + "\nPerson(s) With Safety Hat: {}\nPerson(s) Without Safety Hat: {}".format(personsWithHatsCount, personsWithoutHatsCount)
print(outputMessage)
```
# Congratulations!
You have successfully used Amazon Rekognition to identify specific objects in images and videos.
# References
- https://docs.aws.amazon.com/rekognition/latest/dg/API_DetectLabels.html
- https://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html
- https://docs.aws.amazon.com/rekognition/latest/dg/API_GetLabelDetection.html
# Release Resources
```
%%html
<p><b>Shutting down your kernel for this notebook to release resources.</b></p>
<button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button>
<script>
try {
els = document.getElementsByClassName("sm-command-button");
els[0].click();
}
catch(err) {
// NoOp
}
</script>
%%javascript
try {
Jupyter.notebook.save_checkpoint();
Jupyter.notebook.session.delete();
}
catch(err) {
// NoOp
}
```
| github_jupyter |
# EGM722 - Week 5 Practical: Vector and raster operations using python
## Overview
Up to now, we have worked with either vector data or raster data, but we haven't really used them together. In this week's practical, we'll learn how we can combine these two data types, and see some examples of different analyses, such as zonal statistics or sampling raster data, that we can automate using python.
## Objectives
- learn how to use `rasterstats` to perform zonal statistics
- learn how to handle exceptions using try...except
- rasterize polygon data using `rasterio`
- learn how to mask and select (index) rasters using vector data
- see additional plotting examples using matplotlib
## Data provided
In the data\_files folder, you should have the following:
- LCM2015_Aggregate_100m.tif
- NI_DEM.tif
## 1. Getting started
In this practical, we'll look at a number of different GIS tasks related to working with both raster and vector data in python, as well as a few different python and programming concepts. To get started, run the cell below.
```
%matplotlib notebook
import numpy as np
import rasterio as rio
import geopandas as gpd
import matplotlib.pyplot as plt
from rasterstats import zonal_stats
plt.rcParams.update({'font.size': 22}) # update the font size for our plots to be size 22
```
## 2. Zonal statistics
In GIS, [_zonal statistics_](https://pro.arcgis.com/en/pro-app/latest/tool-reference/spatial-analyst/how-zonal-statistics-works.htm) is a process whereby you calculate statistics for the pixels of a raster in different groups, or zones, defined by properties in another dataset. In this example, we're going to use the Northern Ireland County border dataset from Week 2, along with a re-classified version of the Northern Ireland [Land Cover Map](https://catalogue.ceh.ac.uk/documents/47f053a0-e34f-4534-a843-76f0a0998a2f) 2015[<sup id="fn1-back">1</sup>](#fn1 "footnote 1").
The Land Cover Map tells, for each pixel, what type of land cover is associated with a location - that is, whether it's woodland (and what kind of woodland), grassland, urban or built-up areas, and so on. For our re-classified version of the dataset, we're working with the aggregate class data, re-sampled to 100m resolution from the original 25m resolution.
The raster data type is _unsigned integer_ with a _bitdepth_ of 8 bits - that is, it has a range of possible values from 0 to 255. Even though it has this range of possible values, we only use 10 (11) of them:
| Raster value | Aggregate class name |
| :------------|:---------------------------|
| 0 | No Data |
| 1 | Broadleaf woodland |
| 2 | Coniferous woodland |
| 3 | Arable |
| 4 | Improved grassland |
| 5 | Semi-natural grassland |
| 6 | Mountain, heath, bog |
| 7 | Saltwater |
| 8 | Freshwater |
| 9 | Coastal |
| 10 | Built-up areas and gardens |
In this part of the practical, we'll try to work out the percentage of the entire country that is covered by each of these different landcovers, as well as each of the different counties. To start, we'll load the `LCM2015_Aggregate_100m.tif` raster, as well as the counties shapefile from Week 2:
```
# open the land cover raster and read the data
with rio.open('data_files/LCM2015_Aggregate_100m.tif') as dataset:
xmin, ymin, xmax, ymax = dataset.bounds
crs = dataset.crs
landcover = dataset.read(1)
affine_tfm = dataset.transform
# now, load the county dataset from the week 2 folder
counties = gpd.read_file('../Week2/data_files/Counties.shp').to_crs(crs)
```
Next, we'll define a function that takes an array, and returns a __dict__ object containing the count (number of pixels) for each of the unique values in the array:
```python
def count_unique(array, nodata=0):
'''
Count the unique elements of an array.
:param array: Input array
:param nodata: nodata value to ignore in the counting
:returns count_dict: a dictionary of unique values and counts
'''
count_dict = {}
for val in np.unique(array):
if val == nodata:
continue
count_dict[str(val)] = np.count_nonzero(array == val)
return count_dict
```
Here, we have two input parameters: the first, `array`, is our array (or raster data). The next, `nodata`, is the value of the array that we should ignore. We then define an empty __dict__ (`count_dict = {}`).
With [`numpy.unique()`](https://numpy.org/doc/stable/reference/generated/numpy.unique.html), we get an array containing the unique values of the input array. Note that this works for data like this raster, where we have a limited number of pre-defined values. For something like a digital elevation model, which represents continuous floating-point values, we wouldn't want to use this approach to bin the data.
Next, for each of the different unique values `val`, we find all of the locations in `array` that have that value (`array == val`). Note that this is actually a boolean array, with values of either `True` where `array == val`, and `False` where `array != val`. [`numpy.count_nonzero()`](https://numpy.org/doc/stable/reference/generated/numpy.count_nonzero.html) the counts the number of non-zero (in this case, `True`) values in the array - that is, this:
```python
np.count_nonzero(array == val)
```
tells us the number of pixels in `array` that are equal to `val`. We then assign this to our dictionary with a key that is a __str__ representation of the value, before returning our `count_dict` variable at the end of the function.
Run the cell below to define the function and run it on our `landcover` raster.
```
def count_unique(array, nodata=0):
'''
Count the unique elements of an array.
:param array: Input array
:param nodata: nodata value to ignore in the counting
:returns count_dict: a dictionary of unique values and counts
'''
count_dict = {}
for val in np.unique(array):
if val == nodata:
continue
count_dict[str(val)] = np.count_nonzero(array == val)
return count_dict
unique_landcover = count_unique(landcover)
print(unique_landcover)
```
So this provides us with a __dict__ object with keys corresponding to each of the unique values (1-10).
<span style="color:#009fdf;font-size:1.1em;font-weight:bold">Can you work out the percentage area of Northern Ireland that is covered by each of the 10 landcover classes?</span>
In the following cell, we use [`rasterstats.zonal_stats()`](https://pythonhosted.org/rasterstats/manual.html#zonal-statistics) with our `counties` and `landcover` datasets to do the same exercise as above (counting unique pixel values). Rather than counting the pixels in the entire raster, however, we want to count the number of pixels with each land cover value that fall within a specific area defined by the features in the `counties` dataset:
```
county_stats = zonal_stats(counties, landcover, affine=affine_tfm, categorical=True, nodata=0)
print(county_stats[0])
```
## 3. The zip built-in
This isn't a very readable result, though. If we want to interpret the results for each county, we have to know what land cover name corresponds to each of the values in the raster. One way that we could do this is by writing a function that re-names each of the keys in the __dict__. This example shows one way we could do this: the function takes the original __dict__ object (_dict_in_), as well as a list of the 'old' keys (_old_names_), and the corresponding 'new' keys (_new_names_).
```
def rename_dict(dict_in, old_names, new_names):
'''
Rename the keys of a dictionary, given a list of old and new keynames
:param dict_in: the dictionary to rename
:param old_names: a list of old keys
:param new_names: a list of new key names
:returns dict_out: a dictionary with the keys re-named
'''
dict_out = {}
for new, old in zip(new_names, old_names):
dict_out[new] = dict_in[old]
return dict_out
```
For this function, we're also making use of the built-in function `zip()` ([documentation](https://docs.python.org/3.8/library/functions.html#zip)). In Python 3, `zip()` returns an __iterator__ object that combines elements from each of the iterable objects passed as arguments. As an example:
```
x = [1, 2, 3, 4]
y = ['a', 'b', 'c', 'd']
list(zip(x, y))
```
So, with `zip(x, y)`, each of the elements of `x` is paired with the corresponding element from `y`. If `x` and `y` are different lengths, `zip(x, y)` will only use up to the shorter of the two:
```
x = [1, 2, 3]
list(zip(x, y))
```
Let's see what happens when we run our function `rename_dict()` using the stats for our first county (County Tyrone - remember that the output from zonal_stats will have correspond to the rows of our input vector data):
```
old_names = [float(i) for i in range(1, 11)]
new_names = ['Broadleaf woodland', 'Coniferous woodland', 'Arable', 'Improved grassland',
'Semi-natural grassland', 'Mountain, heath, bog', 'Saltwater', 'Freshwater',
'Coastal', 'Built-up areas and gardens']
rename_dict(county_stats[0], old_names, new_names)
```
Have a look at the _keys_ for `county_stats` - you should notice that there are no pixels corresponding to landcover class 7 (Saltwater), which makes sense given that County Tyrone is an inland county:
```
print(county_stats[0].keys())
print(county_stats[0])
```
To run this for each of our counties, we could run some checks to make sure that we only try to access keys that exist in `dict_in`. For example, we could add an `if` statement to the function:
```python
def rename_dict(dict_in, old_names, new_names):
dict_out = {}
for new, old in zip(new_names, old_names)
if old in dict_in.keys():
dict_out[new] = dict_in[old]
else:
continue
return dict_out
```
But, this is also an example of an exception that isn't necessarily something that requires us to stop executing our program. We don't expect each landcover type to be present in each county, so we don't want our program to stop as soon as it finds out that one of the counties doesn't have a particular landcover type.
## 4. Handling Exceptions with try ... except
Python provides a way to handle these kind of exceptions: the [try...except](https://realpython.com/python-exceptions/#the-try-and-except-block-handling-exceptions) block:
```python
try:
# run some code
except:
# run this if the try block causes an exception
```
In general, it's [not recommended](https://www.python.org/dev/peps/pep-0008/#programming-recommendations) to just have a bare `except:` clause, as this will make it harder to interrupt a program. In our specific case, we only want the interpreter to ignore `KeyError` exceptions - if there are other problems, we still need to know about those:
```
def rename_dict(dict_in, old_names, new_names):
'''
Rename the keys of a dictionary, given a list of old and new keynames
:param dict_in: the dictionary to rename
:param old_names: a list of old keys
:param new_names: a list of new key names
:returns dict_out: a dictionary with the keys re-named
'''
dict_out = {}
for new, old in zip(new_names, old_names):
try:
dict_out[new] = dict_in[old]
except KeyError:
continue
return dict_out
```
Notice how for each pair of names, we try to assign the value corresponding to `old` in `dict_in`. If `old` is not a valid key for `dict_in`, we just move onto the next one. Now, let's run this new function on `county_stats[0]` again:
```
rename_dict(county_stats[0], old_names, new_names)
```
We'll do one last thing before moving on here. Just like with the __dict__ outputs of `zonal_stats()`, the __list__ of __dict__ objects isn't very readable. Let's create a new __dict__ object that takes the county names as keys, and returns the re-named __dict__ objects for each:
```
renamed_list = [rename_dict(d, old_names, new_names) for d in county_stats] # create a list of renamed dict objects
nice_names = [n.title() for n in counties.CountyName]
stats_dict = dict(zip(nice_names, renamed_list))
print(stats_dict['Tyrone'])
print(stats_dict['Antrim'])
```
Depending on how we're using the data, it might be easier to keep the output of `zonal_stats()` as-is, rather than using these long, complicated keys. For visualization and readability purposes, though, it helps to be able to easily and quickly understand what the outputs actually represent.
<span style="color:#009fdf;font-size:1.1em;font-weight:bold">What is the total area (in km<sup>2</sup>) covered by "Mountain, heath, bog" in County Down?</span>
## 5. Rasterizing vector data using rasterio
`rasterstats` provides a nice tool for quickly and easily extracting zonal statistics from a raster using vector data. Sometimes, though, we might want to _rasterize_ our vector data - for example, in order to mask our raster data, or to be able to select pixels. To do this, we can use the [`rasterio.features`](https://rasterio.readthedocs.io/en/latest/api/rasterio.features.html) module:
```
import rasterio.features # we have imported rasterio as rio, so this will be rio.features (and rasterio.features)
```
`rasterio.features`has a number of different methods, but the one we are interested in here is `rasterize()`:
```
rio.features.rasterize(
shapes,
out_shape=None,
fill=0,
out=None,
transform=Affine(1.0, 0.0, 0.0,
0.0, 1.0, 0.0),
all_touched=False,
merge_alg=<MergeAlg.replace: 'REPLACE'>,
default_value=1,
dtype=None,
)
Docstring:
Return an image array with input geometries burned in.
Warnings will be raised for any invalid or empty geometries, and
an exception will be raised if there are no valid shapes
to rasterize.
Parameters
----------
shapes : iterable of (`geometry`, `value`) pairs or iterable over
geometries. The `geometry` can either be an object that
implements the geo interface or GeoJSON-like object. If no
`value` is provided the `default_value` will be used. If `value`
is `None` the `fill` value will be used.
out_shape : tuple or list with 2 integers
Shape of output numpy ndarray.
fill : int or float, optional
Used as fill value for all areas not covered by input
geometries.
...
```
Here, we pass an __iterable__ (__list__, __tuple__, __array__, etc.) that contains (__geometry__, __value__) pairs. __value__ determines the pixel values in the output raster that the __geometry__ overlaps. If we don't provide a __value__, it takes the `default_value` or the `fill` value.
So, to create a rasterized version of our county outlines, we could do the following:
```python
shapes = list(zip(counties['geometry'], counties['COUNTY_ID']))
county_mask = rio.features.rasterize(shapes=shapes, fill=0,
out_shape=landcover.shape, transform=affine_tfm)
```
The first line uses `zip()` and `list()` to create a list of (__geometry__, __value__) pairs, and the second line actually creates the rasterized array, `county_mask`. Note that in the call to `rasterio.features.rasterize()`, we have to set the output shape (`out_shape`) of the raster, as well as the `transform` - that is, how we go from pixel coordinates in the array to real-world coordinates. Since we want to use this rasterized output with our `landcover`, we use the `shape` of the `landcover` raster, as well as its `transform` (`affine_tfm`) - that way, the outputs will line up as we expect. Run the cell below to see what the output looks like:
```
shapes = list(zip(counties['geometry'], counties['COUNTY_ID']))
county_mask = rio.features.rasterize(shapes=shapes, fill=0,
out_shape=landcover.shape, transform=affine_tfm)
plt.figure()
plt.imshow(county_mask) # visualize the rasterized output
```
As you can see, this provides us with an __array__ whose values correspond to the `COUNTY_ID` of the county feature at that location (check the `counties` __GeoDataFrame__ again to see which county corresponds to which ID). In the next section, we'll see how we can use arrays like this to investigate our data further.
## 6. Masking and indexing rasters
So far, we've seen how we can index an array (or a list, a tuple, ...) using simple indexing (e.g., `myList[0]`) or _slicing_ (e.g., `myList[2:4]`). `numpy` arrays, however, can [actually be indexed](https://numpy.org/doc/stable/reference/arrays.indexing.html) using other arrays of type `bool` (the elements of the array are boolean (`True`/`False`) values). In this section, we'll see how we can use this, along with our rasterized vectors, to select and investigate values from a raster using boolean indexing.
To start, we'll open our dem raster - note that this raster has the same georeferencing information as our landcover raster, so we don't have to load all of that information, just the raster band:
```
with rio.open('data_files/NI_DEM.tif') as dataset:
dem = dataset.read(1)
```
From the previous section, we have an array with values corresponding each of the counties of Northern Ireland. Using `numpy`, we can use this array to select elements of other rasters by creating a _mask_, or a boolean array - that is, an array with values of `True` and `False`. For example, we can create a mask corresponding to County Antrim (`COUNTY_ID=1`) like this:
```python
county_antrim = county_mask == 1
```
Let's see what this mask looks like:
```
county_antrim = county_mask == 1
plt.figure()
plt.imshow(county_antrim)
```
We can also combine expressions using functions like [`np.logical_and()`](https://numpy.org/doc/stable/reference/generated/numpy.logical_and.html) or [`np.logical_or()`](https://numpy.org/doc/stable/reference/generated/numpy.logical_or.html). If we wanted to create a mask corresponding to both County Antrim and County Down, we could do the following:
```
antrim_and_down = np.logical_or(county_mask == 3, county_mask == 1)
plt.figure()
plt.imshow(antrim_and_down)
```
We could then find the mean elevation of these two counties by indexing, or selecting, pixels from `dem` using our mask:
```
ad_elevation = dem[antrim_and_down]
print('Mean elevation: {:.2f} m'.format(ad_elevation.mean()))
```
Now let's say we wanted to investigate the two types of woodland we have, broadleaf and conifer. One thing we might want to look at is the area-elevation distribution of each type. To do this, we first have to select the pixels from the DEM that correspond to the broadleaf woodlands, and all of the pixels corresponding to conifer woodlands:
```
broad_els = dem[landcover == 1] # get all dem values where landcover = 1
conif_els = dem[landcover == 2] # get all dem values where landcover = 2
```
Now, we have two different arrays, `broad_els` and `conif_els`, each corresponding to the DEM pixel values of each landcover type. We can plot a histogram of these arrays using [`plt.hist()`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.hist.html), but this will only tell us the number of pixels - for the area-elevation distribution, we have to convert the pixel counts into areas by multiplying with the pixel area (100 m x 100 m).
First, though, we can use `numpy.histogram()`, along with an array representing our elevation bins, to produce a count of the number of pixels with an elevation that falls within each bin. Let's try elevations ranging from 0 to 600 meters, with a spacing of 5 meters:
```
el_bins = np.arange(0, 600, 5) # create an array of values ranging from 0 to 600, spaced by 5.
broad_count, _ = np.histogram(broad_els, el_bins) # bin the broadleaf elevations using the elevation bins
conif_count, _ = np.histogram(conif_els, el_bins) # bin the conifer elevations using the elevation bins
broad_area = broad_count * 100 * 100 # convert the pixel counts to an area by multipling by the pixel size in x, y
conif_area = conif_count * 100 * 100
```
Finally, we can plot the area-elevation distribution for each land cover type using [`matplotlib.pyplot.bar()`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.bar.html):
```
fig, ax = plt.subplots(1, 1, figsize=(8, 8)) # create a new figure and axes object
# plot the area-elevation distributions using matplotlib.pyplot.bar(), converting from sq m to sq km:
_ = ax.bar(el_bins[:-1], broad_area / 1e6, align='edge', width=5, alpha=0.8, label='Broadleaf Woodland')
_ = ax.bar(el_bins[:-1], conif_area / 1e6, align='edge', width=5, alpha=0.8, label='Conifer Woodland')
ax.set_xlim(0, 550) # set the x limits of the plot
ax.set_ylim(0, 30) # set the y limits of the plot
ax.set_xlabel('Elevation (m)') # add an x label
ax.set_ylabel('Area (km$^2$)') # add a y label
ax.legend() # add a legend
```
From this, we can clearly see that Conifer woodlands tend to be found at much higher elevations than Broadleaf woodlands, and at a much larger range of elevations (0-500 m, compared to 0-250 m or so). With these samples (`broad_els`, `conif_els`), we can also calculate statistics for each of these samples using `numpy` functions such as `np.mean()`, `np.median()`, `np.std()`, and so on.
<span style="color:#009fdf;font-size:1.1em;font-weight:bold">Of the 10 different landcover types shown here, which one has the highest mean elevation? What about the largest spread in elevation values?</span>
## Next steps
That's all for this practical. In lieu of an an additional exercise this week, spend some time working on your project - are there concepts or examples from this practical that you can incorporate into your project?
### Footnotes
[<sup id="fn1">1</sup>](#fn1-back)Rowland, C.S.; Morton, R.D.; Carrasco, L.; McShane, G.; O'Neil, A.W.; Wood, C.M. (2017). Land Cover Map 2015 (25m raster, N. Ireland). NERC Environmental Information Data Centre. [doi:10.5285/47f053a0-e34f-4534-a843-76f0a0998a2f](https://doi.org/10.5285/47f053a0-e34f-4534-a843-76f0a0998a2f)</span>
| github_jupyter |
# 5. Statistical Packages in Python for Mathematicians
Statisticians use the following packages in Python:
- Data creation: `random`
- Data analysis/manipulation: `pandas`, `scikit-learn`
- Statistical functions: `scipy.stats`
- Statistical data visualization: `matplotlib`, `seaborn`
- Statistical data exploration: `statsmodels`
## Table of Contents
- Random
- Scipy Statistics
- Seaborn
- Statistical Models
- Python vs. R
Next week? Choose among:
- Machine Learning 2/Deep Learning: `scikit-learn`, `keras`, `tensorflow`
- SAGE
- Other: ___________?
## 5.1 Random
The `random` package implements pseudo-random number generators for various distributions.
```
import random
```
The documentation is available here: https://docs.python.org/3/library/random.html.
```
help(random)
```
Almost all module functions depend on the basic function `random()`, which generates a random float uniformly in the semi-open range `[0.0, 1.0)`. Python uses the Mersenne Twister as the core generator. It produces 53-bit precision floats and has a period of `2**19937-1`. The underlying implementation in C is both fast and threadsafe. The Mersenne Twister is one of the most extensively tested random number generators in existence. However, being completely deterministic, it is not suitable for all purposes, and is completely unsuitable for cryptographic purposes.
```
random.uniform(0,1)
```
For integers, there is uniform selection from a range. For sequences, there is uniform selection of a random element. Let's play a simple game.
```
number = random.choice(range(1,11))
choice = 0
while number != choice:
choice = int(input('Choose a number between 1 and 10 (inclusive): '))
print('Congratulations, you have guessed the right number!')
```
If we used the following line, the number above would be equal to `3`:
```
random.seed(2) # initialize the random number generator
```
We can also use NumPy's random sampling package `numpy.random` (https://docs.scipy.org/doc/numpy-1.15.0/reference/routines.random.html):
```
import numpy as np
np.random.uniform(0,1)
# dir(np.random)
```
With this package, we could immediately create samples drawn from a specific distribution:
```
sample = np.random.normal(0,1,100000)
# sample
import matplotlib.pyplot as plt
plt.hist(sample, bins=50, density=True)
plt.show()
```
## 5.2 Scipy Statistics
This module contains a large number of probability distributions.
```
import scipy.stats
help(scipy.stats)
```
Let's plot some probability density functions of the Gaussian distribution:
```
from scipy.stats import norm
x = np.linspace(-5,5,num=200)
fig = plt.figure(figsize=(12,6))
for mu, s in zip([0.5, 0.5, 0.5], [0.2, 0.5, 0.8]):
plt.plot(x, norm.pdf(x,mu,s), lw=2,
label="$\mu={0:.1f}, s={1:.1f}$".format(mu, s))
plt.fill_between(x, norm.pdf(x, mu, s), alpha = .4)
plt.xlim([-5,5])
plt.legend(loc=0)
plt.ylabel("pdf at $x$")
plt.xlabel("$x$")
plt.show()
```
Let's create an interactive plot of the Gamma distribution:
```
%%capture
from ipywidgets import interactive
from scipy.stats import gamma
x = np.arange(0, 40, 0.005)
shape, scale = 5, 0.5
fig, ax = plt.subplots()
y = gamma.pdf(x, shape, scale=scale)
line = ax.plot(x, y)
ax.set_ylim((0,0.5))
def gamma_update(shape, scale):
y = gamma.pdf(x, shape, scale=scale)
line[0].set_ydata(y)
fig.canvas.draw()
display(fig)
interactive(gamma_update, shape=(0.1, 10.0), scale=(0.3, 3.0))
```
## 5.3 Seaborn
Seaborn is a Python data visualization library based on `matplotlib`. It is the equivalent to `R`'s package `ggplot2` and provides a high-level interface for drawing attractive and informative statistical graphics.
```
import seaborn as sns
```
We will create some basic `seaborn` plots. A gallery is alvailable here: http://seaborn.pydata.org/examples/index.html.
A scatterplot of a bivariate normal distribution:
```
import pandas as pd
mean, cov = [0, 1], [(1, .5), (.5, 1)]
data = np.random.multivariate_normal(mean, cov, 500)
df = pd.DataFrame(data, columns=["x", "y"])
sns.jointplot(x="x", y="y", data=df)
```
A scatterplot matrix:
```
df
df = sns.load_dataset("iris")
sns.pairplot(df, hue="species")
tips = sns.load_dataset("tips")
tips
```
A linear model plot:
```
sns.lmplot(x="total_bill", y="tip", data=tips, hue="smoker")
```
## 5.4 Statistical Models
Statsmodels is a Python package that allows users to explore data, estimate statistical models, and perform statistical tests. An extensive list of descriptive statistics, statistical tests, plotting functions, and result statistics are available for different types of data and each estimator. It complements SciPy's stats module.
```
import numpy as np
import statsmodels.api as sm
```
The user guide can be found here: https://www.statsmodels.org/stable/user-guide.html.
Let's explore our `iris` dataset again:
```
df
```
We would like to know whether the `sepal_length` depends on the explanatory variable `species`. Let's create a boxplot:
```
sns.boxplot(x="species", y="sepal_length", data=df)
```
It seems like this is indeed the case. However, we need to perform some statistical test to conclude this. Let's do some ANOVA (see syllabus Statistical Models, M. de Gunst):
```
lm = sm.OLS.from_formula('sepal_length ~ species', data=df)
fitted_model = lm.fit()
print(sm.stats.anova_lm(fitted_model))
```
We conclude that `species` is a significant explanatory variable for `sepal_length`. We can find the coefficients using the following code:
```
print(fitted_model.summary())
```
Now let's explore a dataset from `statsmodels`:
```
spector_data = sm.datasets.spector.load_pandas().data
spector_data
```
We will again do some ANOVA:
```
m = sm.OLS.from_formula('GRADE ~ GPA + TUCE', spector_data)
print(m.df_model, m.df_resid)
print(m.endog_names, m.exog_names)
res = m.fit()
# res.summary()
print(res.summary())
```
From this table, we conclude that `GPA` is a significant factor but `TUCE` is not. We can extract the coefficients of our fitted model as follows:
```
res.params # parameters
```
Given the values `GPA` and `TUCE`, we can get a predicted value for `GRADE`:
```
m.predict(res.params, [1, 4.0, 25])
```
We predict `GRADE = 1`.
We can also perform some _Fisher tests_ to check whether the explanatory variables are significant:
```
a = res.f_test("GPA = 0")
a.summary()
b = res.f_test("GPA = TUCE = 0")
b.summary()
```
Now let's take the full model:
```
spector_data
m = sm.OLS.from_formula('GRADE ~ GPA + TUCE + PSI', spector_data)
res1 = m.fit()
print(res1.summary())
```
As we can see, `PSI` is an important explanatory variable! We compare our models using the information criteria, or by performing some other tests:
```
res1.compare_f_test(res) # res1 better
res1.compare_lm_test(res)
res1.compare_lr_test(res)
help(sm)
```
We can also use a generalized linear model using the `sm.GLM` function or do some time series analysis using the `sm.tsa` subpackage. The investigation of this is left to the entusiastic reader. An introduction video can be found here:
```
from IPython.display import YouTubeVideo
YouTubeVideo('o7Ux5jKEbcw', width=533, height=300)
```
## 5.5 Python vs. R
There’s a lot of recurrent discussion on the right tool to use for statistics and machine learning. `R` and `Python` are often considered alternatives: they are both good for statistics and machine learning tasks. But which one is the fastest? For a benchmark, it is relatively hard to make it fair: the speed of execution may well depend on the code, or the speed of the different libraries used. We decide to do classification on the Iris dataset. It is a relatively easy Machine Learning project, which seems to make for a fair comparison. We use the commonly used libraries in both `R` and `Python`. The following steps are executed:
1. Read a csv file with the iris data.
2. Randomly split the data in 80% training data and 20% test data.
3. Fit a number of models (logistic regression, linear discriminant analysis, k-nearest neighbors, and support vector machines) on the training data using built-in grid-search and cross-validation methods
4. Evaluate each of those best models on the test data and select the best model
We get the following results:
```
# %load resources/python_vs_R.py
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
def main():
names = ["sepal_length", "sepal_width", "petal_length", "petal_width", "Name"]
iris_data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", names = names)
train, test = train_test_split(iris_data, test_size=0.2)
X_train = train.drop('Name', axis=1)
y_train = train['Name']
X_test = test.drop('Name', axis=1)
y_test = test['Name']
# logistic regression
lr = LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=1000)
lr.fit(X_train, y_train)
# linear discriminant analysis
lda = LinearDiscriminantAnalysis()
lda.fit(X_train,y_train)
# KNN (k-nearest neighbours)
parameters = {'n_neighbors': range(1,11)}
knn = GridSearchCV(KNeighborsClassifier(), parameters, scoring = 'accuracy', cv = KFold(n_splits=5))
knn.fit(X_train,y_train)
# SVM
parameters = {'C': range(1,11)}
svc = GridSearchCV(svm.SVC(kernel = 'linear'), parameters, scoring = 'accuracy', cv = KFold(n_splits=5))
svc.fit(X_train,y_train)
# evaluate
lr_test_acc = lr.score(X_test,y_test)
lda_test_acc = lda.score(X_test,y_test)
knn_test_acc = knn.best_estimator_.score(X_test,y_test)
svc_test_acc= svc.best_estimator_.score(X_test,y_test)
# print(lr_test_acc, lda_test_acc, knn_test_acc, svc_test_acc)
from datetime import datetime as dt
now = dt.now()
for i in range(5):
main()
print(dt.now() - now)
```
It seems that the `Python` code runs a little bit faster. However, when we make the model more complex, or use multiprocessing, the difference is even higher! If speed matters, using `Python` is the best alternative.
### 🔴 *Next Week:*
```
np.random.choice(['Machine learning 2','Something else'], p=[0.99,0.01])
```
| github_jupyter |
**This notebook is an exercise in the [Intro to Deep Learning](https://www.kaggle.com/learn/intro-to-deep-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/ryanholbrook/deep-neural-networks).**
---
# Introduction #
In the tutorial, we saw how to build deep neural networks by stacking layers inside a `Sequential` model. By adding an *activation function* after the hidden layers, we gave the network the ability to learn more complex (non-linear) relationships in the data.
In these exercises, you'll build a neural network with several hidden layers and then explore some activation functions beyond ReLU. Run this next cell to set everything up!
```
import tensorflow as tf
# Setup plotting
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
# Set Matplotlib defaults
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning_intro.ex2 import *
```
In the *Concrete* dataset, your task is to predict the compressive strength of concrete manufactured according to various recipes.
Run the next code cell without changes to load the dataset.
```
import pandas as pd
concrete = pd.read_csv('../input/dl-course-data/concrete.csv')
concrete.head()
```
# 1) Input Shape #
The target for this task is the column `'CompressiveStrength'`. The remaining columns are the features we'll use as inputs.
What would be the input shape for this dataset?
```
# YOUR CODE HERE
input_shape = [concrete.shape[1] - 1]
# Check your answer
q_1.check()
# Lines below will give you a hint or solution code
#q_1.hint()
#q_1.solution()
```
# 2) Define a Model with Hidden Layers #
Now create a model with three hidden layers, each having 512 units and the ReLU activation. Be sure to include an output layer of one unit and no activation, and also `input_shape` as an argument to the first layer.
```
from tensorflow import keras
from tensorflow.keras import layers
# YOUR CODE HERE
model = keras.Sequential([
layers.Dense(units=512, activation='relu', input_shape=input_shape),
layers.Dense(units=512, activation='relu'),
layers.Dense(units=512, activation='relu'),
layers.Dense(units=1)
])
# Check your answer
q_2.check()
model.weights[0]
for layer in model.weights:
print(layer.shape)
# Lines below will give you a hint or solution code
#q_2.hint()
#q_2.solution()
```
# 3) Activation Layers #
Let's explore activations functions some.
The usual way of attaching an activation function to a `Dense` layer is to include it as part of the definition with the `activation` argument. Sometimes though you'll want to put some other layer between the `Dense` layer and its activation function. (We'll see an example of this in Lesson 5 with *batch normalization*.) In this case, we can define the activation in its own `Activation` layer, like so:
```
layers.Dense(units=8),
layers.Activation('relu')
```
This is completely equivalent to the ordinary way: `layers.Dense(units=8, activation='relu')`.
Rewrite the following model so that each activation is in its own `Activation` layer.
```
### YOUR CODE HERE: rewrite this to use activation layers
model = keras.Sequential([
layers.Dense(32, input_shape=[8]),
layers.Activation('relu'),
layers.Dense(32),
layers.Activation('relu'),
layers.Dense(1),
])
# Check your answer
q_3.check()
# Lines below will give you a hint or solution code
#q_3.hint()
#q_3.solution()
```
# Optional: Alternatives to ReLU #
There is a whole family of variants of the `'relu'` activation -- `'elu'`, `'selu'`, and `'swish'`, among others -- all of which you can use in Keras. Sometimes one activation will perform better than another on a given task, so you could consider experimenting with activations as you develop a model. The ReLU activation tends to do well on most problems, so it's a good one to start with.
Let's look at the graphs of some of these. Change the activation from `'relu'` to one of the others named above. Then run the cell to see the graph. (Check out the [documentation](https://www.tensorflow.org/api_docs/python/tf/keras/activations) for more ideas.)
```
# YOUR CODE HERE: Change 'relu' to 'elu', 'selu', 'swish'... or something else
activation_layers = ['relu', 'elu', 'selu', 'swish', 'sigmoid', 'tanh']
for activation_layer in activation_layers:
x = tf.linspace(-3.0, 3.0, 100)
y = layers.Activation(activation_layer)(x)
plt.figure(dpi=100)
plt.title(activation_layer)
plt.plot(x, y)
plt.xlim(-3, 3)
plt.xlabel("Input")
plt.ylabel("Output")
plt.show()
```
# Keep Going #
Now move on to Lesson 3 and [**learn how to train neural networks**](https://www.kaggle.com/ryanholbrook/stochastic-gradient-descent) with stochastic gradient descent.
---
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/191966) to chat with other Learners.*
| github_jupyter |
```
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
data = pd.read_csv('~/kaggle/input/melbourne-housing-snapshot/melb_data.csv')
# Select subset of predictors
cols_to_use = ['Rooms', 'Distance', 'Landsize', 'BuildingArea', 'YearBuilt']
X = data[cols_to_use]
# Select target
y = data.Price
# Separate data into training and validation sets
X_train, X_valid, y_train, y_valid = train_test_split(X, y)
# import XGBoost - extreme gradient boosting
from xgboost import XGBRegressor
my_model = XGBRegressor()
my_model.fit(X_train, y_train)
from sklearn.metrics import mean_absolute_error
predictions = my_model.predict(X_valid)
mae = str(mean_absolute_error(y_valid, predictions))
print("Mean Absolute Error: "+ mae)
```
## Parameter Tuning
The various parameter that can be tuned to dramatically affect accuracy and training speed are
- `n_estimators`
- `early_stopping_rounds`
- `learning_rate`
- `n_jobs`
### n_estimators
specifies how mant times to go through the modelling cycle, thus equal to the number of models included in the ensemble
```
# n_estimators
my_model1 = XGBRegressor(n_estimators=500)
my_model1.fit(X_train, y_train)
predictions = my_model1.predict(X_valid)
mae = str(mean_absolute_error(y_valid, predictions))
print("Mean Absolute Error: "+ mae)
```
### early_stopping_rounds
`early_stopping_rounds` offers a way to automatically find the ideal value for n_estimators. Early stopping causes the model to stop iterating when the validation score stops improving, even if we aren't at the hard stop for `n_estimators`.
Since random chance sometimes causes a single round where validation scores don't improve, you need to specify a number for how many rounds of straight deterioration to allow before stopping.
When using `early_stopping_rounds`, you also need to set aside some data for calculating the validation scores - this is done by setting the `eval_set` parameter.
```
# early_stopping_rounds
my_model2 = XGBRegressor(n_estimators=500)
my_model2.fit(X_train, y_train,
early_stopping_rounds=5,
eval_set=[(X_valid, y_valid)],
verbose=False)
predictions = my_model2.predict(X_valid)
mae = str(mean_absolute_error(y_valid, predictions))
print("Mean Absolute Error: "+ mae)
```
### learning_rate
Step size of gradient descent, multiplied to predictions of each ensembled model before they are added(ensembled).
Allows us to set a higher value of `n_estimators` without overfitting.
```
# learning_rate
my_model3 = XGBRegressor(n_estimators=1000,
learning_rate=0.2)
my_model3.fit(X_train, y_train,
early_stopping_rounds=5,
eval_set=[(X_valid, y_valid)],
verbose=False)
predictions = my_model3.predict(X_valid)
mae = str(mean_absolute_error(y_valid, predictions))
print("Mean Absolute Error: "+ mae)
```
### n_jobs
Implements parallelism to reduce runtime while fitting and training model. Usually set to the number of cores and isn't particularly helpful on smaller models.
```
my_model4 = XGBRegressor(n_estimators=1000,
learning_rate=0.2, n_jobs=6)
my_model4.fit(X_train, y_train,
early_stopping_rounds=5,
eval_set=[(X_valid, y_valid)],
verbose=False)
predictions = my_model4.predict(X_valid)
mae = str(mean_absolute_error(y_valid, predictions))
print("Mean Absolute Error: "+ mae)
def get_score(n_estimators, model):
predictions = model.predict(X_valid)
mae = str(mean_absolute_error(y_valid, predictions))
return mae
models = [ my_model1, my_model2, my_model3, my_model4 ]
model_results = {}
for model in models:
results = {}
for i in range(1,11):
results[100*i] = get_score(100*i, model)
print(results)
model_results[model] = results
print('\n')
print(model_results)
```
| github_jupyter |
# Analysis for the floor control detection (FCD) model and competitor models
This notebook analyses the predictions of the FCD model and the competitor models discussed in the paper and show how they are compared over a few performance measurements. It also includes some stats about the dataset and the annotated floor properties, and an optimised FCD model for highest accuracy.
```
import itertools
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyjags
from scipy import optimize as soptimize
import predict_fcd
import utils.annotated_floor
import utils.iteration
import utils.mcmc_plot
import utils.path
%load_ext autoreload
%autoreload 2
plt.style.use('ggplot')
plt.rcParams.update({'axes.titlesize': 'large'})
np.random.seed(1234)
FEATURES_DIR = pathlib.Path('features')
PREDICTIONS_DIR = pathlib.Path('predictions')
ANALYSIS_SAMPLE_RATE = 10
SAMPLE_RATE = {
'fcd': 50,
'optimised_fcd': 50,
'lstm': 20,
'partial_lstm': 20,
'vad': 50,
'random': ANALYSIS_SAMPLE_RATE,
}
MODELS = list(SAMPLE_RATE.keys())
DEFAULT_FCD_PARAMS = (0.35, 0.1)
OPTIMISED_FCD_PARAMS = (1.78924915, 1.06722576) # Overriden by lengthy optimisation below
CHAINS = 4
ITERATIONS = 10_000
```
# Utilities
Utility functions and generator functions that are used throughout the code and use the constants declared above. More utilities are imported from the `util` package. These are considered more generic.
### General utilities
```
def array_to_series(x, name, sample_rate):
'''
Convert a numpy array to a pandas series
with time index.
'''
x = x[::sample_rate // ANALYSIS_SAMPLE_RATE]
return pd.Series(
x,
index=np.arange(len(x)) / ANALYSIS_SAMPLE_RATE,
name=name,
)
def utterances_to_floor(utterances_df):
'''
Calculate the floor timeseries from a dataframe
of utterances (every row has start_time, end_time,
and participant).
'''
return array_to_series(
list(
utils.annotated_floor.gen(
utterances_df,
sample_rate=ANALYSIS_SAMPLE_RATE,
)
),
name='floor',
sample_rate=ANALYSIS_SAMPLE_RATE,
)
```
### Random model utilities
```
def _generate_random_model_intervals(average_floor_duration):
floor_holder = np.random.randint(2)
previous_timestamp = 0
while True:
samples = np.random.exponential(average_floor_duration, 100)
timestamps = samples.cumsum() + previous_timestamp
for timestamp in timestamps:
yield {
'start_time': previous_timestamp,
'end_time': timestamp,
'participant': floor_holder,
}
floor_holder = (floor_holder * -1) + 1
previous_timestamp = timestamp
def calculate_random_model(average_floor_duration, part_duration):
'''
Calculate a random floor array with turns duration distributin
exponentially with `average_floor_duration` as mean.
'''
gen = _generate_random_model_intervals(average_floor_duration)
gen = itertools.takewhile(lambda i: i['start_time'] < part_duration, gen)
return list(
utils.iteration.intervals_to_values_gen(
gen,
sample_rate=ANALYSIS_SAMPLE_RATE,
key='participant',
)
)
```
### Dataset stats utilities
```
def dataset_stats_gen():
'''
Calculate basic stats about the annotated floor.
'''
for part in utils.path.session_parts_gen(train_set=True, test_set=True):
utterances_df = pd.read_csv(FEATURES_DIR / 'utterances' / f'{part}.csv')
floor_intervals = list(utils.annotated_floor.utterances_to_floor_intervals_gen(utterances_df))
floor = utterances_to_floor(utterances_df)
yield {
'competition_for_floor': np.isnan(floor).mean(),
'average_floor_duration': np.mean([i['end_time'] - i['start_time'] for i in floor_intervals]),
'average_part_duration': utterances_df['end_time'].max(),
}
```
### Performance measurment generator functions
```
def accuracy(model, floor):
'''
Every 10 seconds, if defined floor (no competition nor silence)
yields 1 if the model and the floor agrees, 0 otherwise. 10 seconds
jumps are used to make sure the samples are independent.
'''
jump = 10 * ANALYSIS_SAMPLE_RATE
both = pd.concat([model, floor], axis=1)[::jump].dropna()
yield from (both.iloc[:, 0] == both.iloc[:, 1]).astype(int)
def backchannels(model, utterances_df):
'''
For each backchannel yield 1 if the model report a floor
for the partner, 0 otherwise.
'''
backchannels = utterances_df[utterances_df['backchannel']]
for _, bc in backchannels.iterrows():
bc_timestamp = bc['start_time']
prediction_at_bc = model[bc_timestamp:].values[0]
if prediction_at_bc:
yield int(prediction_at_bc != bc['participant'])
def _floor_holder_changes(array):
array = array[~np.isnan(array)]
items = utils.iteration.dedup(array)
return len(list(items)) - 1 # number of changes is number of values minus 1
def stability(model, floor):
'''
Ratio of actual floor changes vs. predicted floor changes.
'''
annotated_floor_changes = _floor_holder_changes(floor)
model_floor_changes = _floor_holder_changes(model)
yield annotated_floor_changes / model_floor_changes
def lag(model, floor):
'''
Yield positive lags in seconds.
'''
model_change = pd.Series(dict(utils.iteration.dedup(model.dropna().iteritems(), key=lambda x: x[1])))
floor_change = pd.Series(dict(utils.iteration.dedup(floor.dropna().iteritems(), key=lambda x: x[1])))
visited_timestamps = set()
for timestamp, prediction in model_change.iteritems():
previous_floors = floor_change[:timestamp]
if not previous_floors.empty:
current_floor_timestamp = previous_floors.index[-1]
current_floor_value = previous_floors.values[-1]
if (current_floor_value == prediction and current_floor_timestamp not in visited_timestamps):
yield (timestamp - current_floor_timestamp)
visited_timestamps.add(current_floor_timestamp)
```
### Models' performance (stats) collection utilities
```
def _part_models_stats_gen(part, average_floor_duration):
utterances_df = pd.read_csv(FEATURES_DIR / 'utterances' / f'{part}.csv')
floor = utterances_to_floor(utterances_df)
rms = np.load(FEATURES_DIR / 'FCD' / f'{part}.npy')
models = {
'fcd': np.load(PREDICTIONS_DIR / 'FCD' / f'{part}.npy'),
'optimised_fcd': list(predict_fcd.gen_from_rms(rms, *OPTIMISED_FCD_PARAMS)),
'lstm': np.load(PREDICTIONS_DIR / 'LSTM' / f'full-{part}.npy'),
'partial_lstm': np.load(PREDICTIONS_DIR / 'LSTM' / f'partial-{part}.npy'),
'vad': np.load(PREDICTIONS_DIR / 'VAD' / f'{part}.npy'),
'random': calculate_random_model(
average_floor_duration,
part_duration=floor.index[-1],
),
}
models_df = pd.concat(
[array_to_series(x, name=n, sample_rate=SAMPLE_RATE[n]) for n, x in models.items()],
axis=1,
)
measurement_functions_and_args = {
backchannels: utterances_df,
**{f: floor for f in [accuracy, stability, lag]},
}
for model in models:
for f, arg in measurement_functions_and_args.items():
for value in f(models_df[model], arg):
yield {
'part': part,
'model': model,
'measurement': f.__name__,
'value': value,
}
def models_stats_gen(average_floor_duration):
'''
Calculate the performance measure for each model accross the
test-set.
'''
for part in utils.path.session_parts_gen(train_set=False, test_set=True):
yield from _part_models_stats_gen(part, average_floor_duration)
```
### Bayesian analysis utilities
```
def gamma_template(mode, sd):
'''
Return a string template with shape and rate from mode and sd.
'''
rate = f'({mode} + sqrt({mode} ^ 2 + 4 * {sd} ^ 2)) / (2 * {sd} ^ 2)'
shape = f'1 + {mode} * {rate}'
return f'{shape}, {rate}'
def beta_template(mode, k):
'''
Return a string template with a and b from mode and concentration.
'''
a = f'{mode} * ({k} - 2) + 1'
b = f'(1 - {mode}) * ({k} - 2) + 1'
return f'{a}, {b}'
def run_model(code, data):
'''
Create and sample a JAGS model.
'''
model = pyjags.Model(code=code, data=data, chains=CHAINS)
return model.sample(ITERATIONS, vars=['mode'])
def mode_comparison(trace, models, diag_xlim, comp_xlim):
utils.mcmc_plot.param_comparison(
trace,
'mode',
comparison=[MODELS.index(m) for m in models],
names=models,
diag_xlim=diag_xlim,
comp_xlim=comp_xlim,
)
def compare_two(models, traces, xlim):
_, axes = plt.subplots(ncols=len(traces), figsize=(8, 2))
for ax, (measurement, trace) in zip(axes, traces.items()):
m1, m2 = [MODELS.index(m) for m in models]
ax.set(title=measurement)
ax.axvline(0, linestyle='--', c='grey')
utils.mcmc_plot.dist(
trace['mode'][m1].reshape(-1) - trace['mode'][m2].reshape(-1),
histplot_kwargs={'binrange': xlim},
ax=ax,
)
def _hdi_as_dict(model, samples):
return {
'model': model,
'hdi_start': np.percentile(samples, 2.5),
'hdi_end': np.percentile(samples, 97.5),
}
def hdi_summary(models, trace):
for m in models:
samples = trace['mode'][MODELS.index(m)].reshape(-1)
yield _hdi_as_dict(m, samples)
for m1, m2 in itertools.combinations(models, 2):
samples_m1 = trace['mode'][MODELS.index(m1)].reshape(-1)
samples_m2 = trace['mode'][MODELS.index(m2)].reshape(-1)
diff = samples_m1 - samples_m2
yield _hdi_as_dict(f'{m1} - {m2}', diff)
```
# Analysis starts here!
## Dataset stats
```
dataset_stats_df = pd.DataFrame(dataset_stats_gen())
dataset_stats_df.describe()
# Keep the average floor duration for later, for the random model
average_floor_duration = dataset_stats_df['average_floor_duration'].mean()
```
## Optimising FCD parameters for accuracy
This is done on the train set.
```
optimisation_data = []
for part in utils.path.session_parts_gen(train_set=True, test_set=False):
utterances_df = pd.read_csv(FEATURES_DIR / 'utterances' / f'{part}.csv')
floor = utterances_to_floor(utterances_df)
rms = np.load(FEATURES_DIR / 'FCD' / f'{part}.npy')
optimisation_data.append((rms, floor))
def get_negative_accuracy_from_model(params):
accuracies = []
for rms, floor in optimisation_data:
fcd_gen = predict_fcd.gen_from_rms(rms, *params)
fcd = array_to_series(list(fcd_gen), name='fcd', sample_rate=SAMPLE_RATE['fcd'])
accuracies.append(np.mean(list(accuracy(fcd, floor))))
return -np.mean(accuracies)
```
**Note!** This cell takes a while to run. It is commented out as the entire notebook can be executed without it. The default optimised parameters (declared at the top of the notebook) are used in that case.
```
# %%time
# res = soptimize.basinhopping(
# get_negative_accuracy_from_model,
# DEFAULT_FCD_PARAMS,
# seed=1234,
# )
# OPTIMISED_FCD_PARAMS = res.x
# res
```
**Example of the output of the cell above for reference**
```
CPU times: user 1h 7min 23s, sys: 24.2 s, total: 1h 7min 47s
Wall time: 1h 7min 40s
fun: -0.890908193538182
lowest_optimization_result: fun: -0.890908193538182
hess_inv: array([[1, 0],
[0, 1]])
jac: array([0., 0.])
message: 'Optimization terminated successfully.'
nfev: 3
nit: 0
njev: 1
status: 0
success: True
x: array([1.78924915, 1.06722576])
message: ['requested number of basinhopping iterations completed successfully']
minimization_failures: 0
nfev: 303
nit: 100
njev: 101
x: array([1.78924915, 1.06722576])
```
## The average of the models' performance on each measurement
```
models_stats_df = pd.DataFrame(models_stats_gen(average_floor_duration))
models_stats_df['model'] = pd.Categorical(
models_stats_df['model'],
categories=MODELS,
ordered=True,
)
for c in ['part', 'measurement']:
models_stats_df[c] = models_stats_df[c].astype('category')
(
models_stats_df
# Average within parts
.groupby(['model', 'measurement', 'part'])
.mean()
# Average accross parts
.reset_index()
.pivot_table(index='model', columns='measurement', values='value')
)
```
## Bayesian analysis of differences between the models
Here we estimate the mode of the accuracy, backchannels classification, stability, and lag, for each model. The Bayesian method provides a direct way to estimate the differences between the modes.
```
group_by_measurement = models_stats_df.groupby('measurement')
```
### Accuracy
```
hierarchical_beta_code = f"""
model {{
for (m in 1:n_models) {{
for (p in 1:n_parts) {{
correct[m, p] ~ dbin(part_mode[m, p], attempts[m, p])
part_mode[m, p] ~ dbeta({beta_template('mode[m]', 'concentration[m]')})
}}
mode[m] ~ dunif(0, 1)
concentration[m] = concentration_minus_two[m] + 2
concentration_minus_two[m] ~ dgamma({gamma_template(20, 20)})
}}
}}
"""
_df = group_by_measurement.get_group('accuracy')
accuracy_data = {
'n_parts': len(_df['part'].unique()),
'n_models': len(_df['model'].unique()),
'correct': _df.pivot_table(index='model', columns='part', values='value', aggfunc='sum'),
'attempts': _df.pivot_table(index='model', columns='part', values='value', aggfunc='count'),
}
accuracy_trace = run_model(code=hierarchical_beta_code, data=accuracy_data)
mode_comparison(accuracy_trace, ['fcd', 'lstm', 'random'], diag_xlim=(0, 1), comp_xlim=(-0.6, 0.6))
```
### Backchannels categorisation
```
_df = group_by_measurement.get_group('backchannels')
bc_data = {
'n_parts': len(_df['part'].unique()),
'n_models': len(_df['model'].unique()),
'correct': _df.pivot_table(index='model', columns='part', values='value', aggfunc='sum'),
'attempts': _df.pivot_table(index='model', columns='part', values='value', aggfunc='count'),
}
bc_trace = run_model(code=hierarchical_beta_code, data=bc_data)
mode_comparison(bc_trace, ['fcd', 'lstm', 'random'], diag_xlim=(0, 1), comp_xlim=(-0.6, 0.6))
```
### Stability
```
stability_code = f"""
model {{
for (m in 1:n_models) {{
for (p in 1:n_parts) {{
stability[m, p] ~ dgamma({gamma_template('mode[m]', 'sd[m]')})
}}
mode[m] ~ dgamma({gamma_template(1, 1)})
sd[m] ~ dgamma({gamma_template(1, 1)})
}}
}}
"""
_df = group_by_measurement.get_group('stability')
stability_data = {
'n_parts': len(_df['part'].unique()),
'n_models': len(_df['model'].unique()),
'stability': _df.pivot(index='model', columns='part', values='value'),
}
stability_trace = run_model(code=stability_code, data=stability_data)
mode_comparison(stability_trace, ['fcd', 'lstm', 'random'], diag_xlim=(0, 1.25), comp_xlim=(-1.2, 1.2))
```
### Lag
```
lag_code = f"""
model {{
for (i in 1:n_lags) {{
lag[i] ~ dexp(1 / part_mean[models[i], part[i]])
}}
for (i in 1:n_models) {{
for (j in 1:n_parts) {{
part_mean[i, j] ~ dgamma({gamma_template('mode[i]', 'sd[i]')})
}}
mode[i] ~ dgamma({gamma_template(0.5, 1)})
sd[i] ~ dgamma({gamma_template(1, 1)})
}}
}}
"""
_df = group_by_measurement.get_group('lag')
lag_data = {
'n_parts': len(_df['part'].unique()),
'n_models': len(_df['model'].unique()),
'n_lags': len(_df),
'lag': _df['value'],
'models': _df['model'].cat.codes + 1,
'part': _df['part'].cat.codes + 1,
}
lag_trace = run_model(code=lag_code, data=lag_data)
mode_comparison(lag_trace, ['fcd', 'lstm', 'random'], diag_xlim=(0, 2.1), comp_xlim=(-2.2, 2.2))
```
### FCD with default params vs. optimised FCD
```
traces = {
'accuracy': accuracy_trace,
'backchannels': bc_trace,
'stability': stability_trace,
'lag': lag_trace,
}
compare_two(['fcd', 'optimised_fcd'], traces, xlim=(-0.75, 0.75))
```
### LSTM vs. partial-LSTM
```
compare_two(['lstm', 'partial_lstm'], traces, xlim=(-0.75, 0.75))
```
### Optimised FCD vs. LSTM
This is marely to see if the lag of the optimised FCD is better.
```
compare_two(['optimised_fcd', 'lstm'], traces, xlim=(-0.75, 0.75))
```
### HDIs summary
```
models = ['fcd', 'lstm', 'random']
comp_values = [0.5, 0.5, 1, average_floor_duration / 2]
fig, axes = plt.subplots(nrows=len(traces), figsize=(8, 8), sharex=True)
for ax, (measurement, trace), comp_value in zip(axes, traces.items(), comp_values):
yticks = {}
ax.axvline(0, linestyle='--', c='grey')
if comp_value:
ax.axvline(comp_value, linestyle='dotted', c='grey')
for i, row in enumerate(hdi_summary(models, trace)):
ax.plot((row['hdi_start'], row['hdi_end']), (-i, -i), linewidth=4, c='k')
for tail, alignment in zip(['hdi_start', 'hdi_end'], ['right', 'left']):
s = format(row[tail], '.2f').replace('-0', '-').lstrip('0')
ax.text(row[tail], -i + 0.1, s, horizontalalignment=alignment)
yticks[-i] = row['model']
ax.set(title=measurement)
ax.set_yticks(list(yticks.keys()))
ax.set_yticklabels(list(yticks.values()))
fig.tight_layout()
fig.savefig('graphics/hdis.svg')
```
| github_jupyter |
# 3D Object Detection Evaluation Tutorial
Welcome to the 3D object detection evaluation tutorial! We'll walk through the steps to submit your detections to the competition server.
```
from av2.evaluation.detection.eval import evaluate
from av2.evaluation.detection.utils import DetectionCfg
from pathlib import Path
from av2.utils.io import read_feather, read_all_annotations
```
### Constructing the evaluation configuration
The `DetectionCfg` class stores the configuration for the 3D object detection challenge.
- During evaluation, we remove _all_ cuboids which are not within the region-of-interest (ROI) which spatially is a 5 meter dilation of the drivable area isocontour.
- **NOTE**: If you would like to _locally_ enable this behavior, you **must** pass in the directory to sensor dataset (to build the raster maps from the included vector maps).
```
dataset_dir = Path.home() / "data" / "datasets" / "av2" / "sensor" # Path to your AV2 sensor dataset directory.
competition_cfg = DetectionCfg(dataset_dir=dataset_dir) # Defaults to competition parameters.
split = "val"
gts = read_all_annotations(dataset_dir=dataset_dir, split=split) # Contains all annotations in a particular split.
display(gts)
```
## Preparing detections for submission.
The evaluation expects the following 14 fields within a `pandas.DataFrame`:
- `tx_m`: x-component of the object translation in the egovehicle reference frame.
- `ty_m`: y-component of the object translation in the egovehicle reference frame.
- `tz_m`: z-component of the object translation in the egovehicle reference frame.
- `length_m`: Object extent along the x-axis in meters.
- `width_m`: Object extent along the y-axis in meters.
- `height_m`: Object extent along the z-axis in meters.
- `qw`: Real quaternion coefficient.
- `qx`: First quaternion coefficient.
- `qy`: Second quaternion coefficient.
- `qz`: Third quaternion coefficient.
- `score`: Object confidence.
- `log_id`: Log id associated with the detection.
- `timestamp_ns`: Timestamp associated with the detection.
- `category`: Object category.
Additional details can be found in [SUBMISSION_FORMAT.md](../src/av2/evaluation/detection/SUBMISSION_FORMAT.md).
```
# If you've already aggregated your detections into one file.
dts_path = Path("detections.feather")
dts = read_feather(dts_path)
dts, gts, metrics = evaluate(dts, gts, cfg=competition_cfg) # Evaluate instances.
display(metrics)
```
Finally, if you would like to submit to the evaluation server, you just need to export your detections into a `.feather` file. This can be done by:
```python
dts.to_feather("detections.feather")
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.