code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
import time
import pandas as pd
import requests
import json5
import matplotlib.pyplot as plt
```
# Loading national data
```
df_nat = pd.read_csv("../Data/Employment_Projections.csv").sort_values('Employment 2030',ascending=False)
```
# Loading CA data
```
df_CA = pd.read_csv("../Data/CA_Long_Term_Occupational_Employment_Projections.csv").sort_values('Projected Year Employment Estimate',ascending=False)
df_Sac = df_CA[df_CA['Area Name (County Names)']=='Sacramento--Roseville--Arden-Arcade MSA (El Dorado, Placer, Sacramento, and Yolo Counties)'].copy()
df_Cal = df_CA[df_CA['Area Name (County Names)']=='California'].copy()
```
Filtering for those occupations that make 40k a year or more and cleaning occupational code for the national table to match the california tables
```
df_Sac_40k = df_Sac[df_Sac['Median Annual Wage']>=40000].copy()
df_nat['Occupation Code']=df_nat['Occupation Code'].str.extract(r'([0-9]{2}-[0-9]{4})')
```
need to bin education levels
```
df_Sac_40k['Entry Level Education'].value_counts()
education_levels = {'No formal educational credential':'<HS',
'High school diploma or equivalent':'HS+',
"Bachelor's degree":'Associates+',
"Associate's degree":'Associates+',
'Postsecondary non-degree award':'HS+',
'Some college, no degree':'HS+'
}
df_Sac['Education bin_a'] = df_Sac['Entry Level Education'].replace(to_replace=education_levels)
df_Sac_40k['Education bin_a'] = df_Sac_40k['Entry Level Education'].replace(to_replace=education_levels)
df_Cal['Education bin_a'] = df_Cal['Entry Level Education'].replace(to_replace=education_levels)
```
Less than HS
```
less_hs = df_Sac[df_Sac['Education bin_a']=='<HS'].sort_values(by='Projected Year Employment Estimate',ascending=False)
less_hs.head().transpose()
df_Sac_40k[df_Sac_40k['Education bin_a']=='<HS'].sort_values(by='Projected Year Employment Estimate',ascending=False).head().transpose()
```
HS or some colege
```
hs_plus = df_Sac[df_Sac['Education bin_a']=='HS+'].sort_values(by='Projected Year Employment Estimate',ascending=False)
hs_plus.head().transpose()
df_Sac_40k[df_Sac_40k['Education bin_a']=='HS+'].sort_values(by='Projected Year Employment Estimate',ascending=False).head().transpose()
```
Associates plus
```
sac_degree = df_Sac[df_Sac['Education bin_a']=='Associates+'].sort_values(by='Projected Year Employment Estimate',ascending=False)
sac_degree.head().transpose()
df_Sac_40k[df_Sac_40k['Education bin_a']=='Associates+'].sort_values(by='Projected Year Employment Estimate',ascending=False).head().transpose()
```
Looking at bar charts of training needed and histograms of Median Annual Wage
```
fig,axs = plt.subplots(1,3,figsize=(12,6))
axs[0].hist(less_hs[less_hs['Median Annual Wage']>0]['Median Annual Wage'],color='g')
axs[1].hist(hs_plus[hs_plus['Median Annual Wage']>0]['Median Annual Wage'],color='c')
axs[2].hist(sac_degree[sac_degree['Median Annual Wage']>0]['Median Annual Wage'],color='m')
plt.title('Distribution of Median Annual Salaries')
```
Ok, that is ugly
```
less_hs_counts = pd.DataFrame(less_hs['Job Training'].value_counts(normalize=True,sort=True,ascending=True,dropna=False))
less_hs_counts['training needed']=less_hs_counts.index
less_hs_counts.rename(columns={'Job Training':'frequency'}, inplace=True)
plt.figure(figsize=(8,4))
plt.barh(y='training needed',width='frequency',data=less_hs_counts,color='rosybrown')
plt.title('Frequencies of training needed for occupations not requiring a high school diploma')
hs_counts = pd.DataFrame(hs_plus['Job Training'].value_counts(normalize=True,sort=True,ascending=True,dropna=False))
hs_counts['training needed']=hs_counts.index
hs_counts.rename(columns={'Job Training':'frequency'}, inplace=True)
plt.figure(figsize=(8,4))
plt.barh(y='training needed',width='frequency',data=hs_counts,color='rosybrown')
plt.title('Frequencies of training needed for occupations requiring a high school diploma')
college_counts = pd.DataFrame(sac_degree['Job Training'].value_counts(normalize=True,sort=True,ascending=True,dropna=False))
college_counts['training needed']=college_counts.index
college_counts.rename(columns={'Job Training':'frequency'}, inplace=True)
plt.figure(figsize=(8,4))
plt.barh(y='training needed',width='frequency',data=college_counts,color='rosybrown')
plt.title("Frequencies of training needed for occupations requiring an associates or bachelor's degree")
```
| github_jupyter |
# Training a multi-linear classifier
*In this assignment I had to train and test a one layer network with multiple outputs to classify images from the CIFAR-10 dataset. I trained the network using mini-batch gradient descent applied to a cost function that computes cross-entropy loss of the classifier applied to the labelled training data and an L2 regularization term on the weight matrix.*
```
#@title Installers
#installers if needed
#!pip install -U -q PyDrive
# !pip uninstall scipy
# !pip install scipy==1.2.0
# !pip install texttable
#@title Import libraries
#Import CIFAR-10 data from my google drive folder; I downoaded and unzipped the CIRAR-10 files and uploaded them to my drive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
import pandas
import numpy
from texttable import Texttable
from sklearn.preprocessing import StandardScaler
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
from PIL import Image
import pickle
import numpy as np
from googleapiclient.discovery import build
drive_service = build('drive', 'v3')
import io
from googleapiclient.http import MediaIoBaseDownload
import matplotlib.pyplot as plt
from scipy import misc #remove, using PIL instead
#@title Functions: Decoding and displaying images
def unpickle(file):
dict = pickle.load(file, encoding='bytes')
return dict
def unpickle_getFromDrive(file_id):
filename = GetFromDrive(file_id)
dict = pickle.load(filename, encoding='bytes')
return dict
def loadLabels(file_id):
data = unpickle_getFromDrive(label_file)
labels = [x.decode('ascii') for x in data[b'label_names']]
return labels
def LoadBatch(file_id):
filename = GetFromDrive(file_id)
dataset = unpickle(filename)
dataSamples = dataset[b'data'] / 255
labels = dataset[b'labels']
y = labels
label_count = np.max(labels)
X = dataSamples
Y = np.array([[0 if labels[i] != j else 1 for j in range(label_count + 1)] for i in range(len(labels))])
return X, Y, y
def GetFromDrive(file_id):
request = drive_service.files().get_media(fileId=file_id)
downloaded = io.BytesIO()
downloader = MediaIoBaseDownload(downloaded, request)
done = False
while done is False:
_, done = downloader.next_chunk()
downloaded.seek(0)
return downloaded
def plot(tr_loss, val_loss, tr_accuracy, val_accuracy):
plt.subplot(1,2,1)
plt.plot(tr_loss, 'g-', label='training loss')
plt.plot(val_loss, 'r-', label='validation loss')
plt.title('Cost function')
plt.xlabel('epoch')
plt.ylabel('cost')
plt.legend()
plt.subplot(1,2,2)
plt.plot(tr_accuracy, 'g-', label='training data')
plt.plot(val_accuracy, 'r-', label='validation data')
plt.title('Accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend()
plt.show()
def image(img, label=''):
sq_img = np.rot90(np.reshape(img, (32, 32, 3), order='F'), k=3)
plt.imshow(sq_img, interpolation='gaussian')
plt.axis('off')
plt.title(label)
def showImageFromWeightsWithLabels(W, labels):
for i, row in enumerate(W):
img = (row - row.min()) / (row.max() - row.min())
plt.subplot(2, 5, i+1)
image(img, label=labels[i])
plt.show()
```
EXERCISE 1. PART 1.
*Read in and store the training, validation and test data*
```
#@title Code: Load training-, validation- and test- data
#string are my file-id.s from my drive
#(you need exchange these with your own ids)
data_batch_1 = '1'
data_batch_2 = '2'
data_batch_3 = '3'
data_batch_4 = '4'
data_batch_5 = '5'
test_batch = '6'
label_file = '7'
# Read in and store the training, validation and test data
labels = loadLabels(label_file)
X_train, Y_train, y_train = LoadBatch(data_batch_1)
X_val, Y_val, y_val = LoadBatch(data_batch_2)
X_test, Y_test, y_test = LoadBatch(test_batch)
image(X_train[1])
```
EXERCISE 1. PART 2.
*Transform training data to have zero mean*
```
#@title Functions: Normalize data
def getNormalized(X):
m = np.mean(X, axis = 0)
return (X - m, m)
#@title Code: Normalize data
X_train, normalMeans = getNormalized(X_train)
X_test -= normalMeans
X_val -= normalMeans
image(X_train[1])
print("X_train mean: " + str(np.mean(X_train)))
print("X_val mean: " + str(np.mean(X_val)))
print("X_test mean: " + str(np.mean(X_test)))
```
EXERCISE 1. PART 3.
*Initialize parameters of the model W and b with entry to have Gaussian random values (incl. zero mean and standard deviation of .01)*
```
mean = 0.0
s = 0.01
d = X_train.shape[1]
K = Y_train.shape[1]
W = np.random.normal(mean, s, (K, d)) # Weight matrix; Normal (Gaussian) distribution
b = np.random.normal(mean, s, (K, 1)) # Bias vector; Normal (Gaussian) distribution
```
EXERCISE 1. PART 4.
*Function that evaluates the network function*
```
#@title Functions: EvaluateClassifier and Softmax
#Data size, per batch contains a dic
#with data structre 10000*3072 and an RGB array 32*32*3,
#where labels are of size 10000numbers in range 0-9 i.e. 10labels
def EvaluateClassifier(X, W, b):
P = softmax(np.dot(W, X.T) + b)
return P
def softmax(s):
return np.exp(s) / np.sum(np.exp(s), axis=0)
P = EvaluateClassifier(X_train[:100], W, b) #Check subset
np.sum(P, axis = 0) # Check if the sums for each sample sum up to 1
```
EXERCISE 1. PART 5.
*Function that computes the cost function*
```
#@title Functions: Compute Cost and Cross Entropy Loss
def CrossEntropyLoss(X, Y, W, b):
log_X = np.multiply(Y.T , EvaluateClassifier(X,W,b)).sum(axis=0)
log_X[log_X == 0] = np.finfo(float).eps
return -np.log(log_X)
def ComputeCost(X, Y, W, b, lamda, scale_const = 1e+6):
return np.mean(scale_const * CrossEntropyLoss(X, Y, W, b)) / scale_const \
+ lamda * np.sum(scale_const * np.power(W, 2)) / scale_const
J = ComputeCost(X_train, Y_train, W, b, lamda = 0)
print("Loss from Cost Function: " + str(J))
```
EXERCISE 1. PART 6.
*Function that computes the accuracy*
```
#@title Functions: Compute Accuracy
def ComputeAccuracy(X, y, W, b):
predictions = np.argmax(EvaluateClassifier(X,W,b) , axis = 0)
accuracy = (predictions == y).mean()
return accuracy
acc = ComputeAccuracy(X_train, y_train, W, b)
print("Check accuracy: " + str(acc))
```
EXERCISE 1. PART 7.
*Function that evaluates, for a mini-batch, the gradients, of the cost function w.r.t. W and b*
```
#@title Functions: Compute gradients and display differences between methods
# Check Check analytic gradient computations against numerical estimations of the gradients!
class FFNet(): #Feed Forward Neural Network, Single Layer
def __init__(self, d, K, mean, s):
self.d = d
self.K = K
self.W = np.random.normal(mean, s, (K, d))
self.b = np.random.normal(mean, s, (K, 1))
def computeGradsNum(self, X, Y, lamda, h = 1e-8): #finite difference method = Faster but less accurate calculation of the gradients
# return (grad_W, grad_b)
P = EvaluateClassifier(X, self.W, self.b)
""" Converted from matlab code """
no = self.W.shape[0]
d = X.shape[0]
grad_W = np.zeros(self.W.shape);
grad_b = np.zeros((no, 1));
c = ComputeCost(X, Y, self.W, self.b, lamda);
for i in range(len(self.b)):
b_try = np.array(self.b)
b_try[i] += h
c2 = ComputeCost(X, Y, self.W, b_try, lamda)
grad_b[i] = (c2-c) / h
for i in range(self.W.shape[0]):
for j in range(self.W.shape[1]):
W_try = np.array(self.W)
W_try[i,j] += h
c2 = ComputeCost(X, Y, W_try, self.b, lamda)
grad_W[i,j] = (c2-c) / h
return [grad_W, grad_b]
def computeGradsNumSlow(self, X, Y, lamda, h = 1e-8): #Centered difference formula = More exact calculation of the gradients but slower
""" Converted from matlab code """
no = self.W.shape[0]
d = X.shape[0]
grad_W = np.zeros(self.W.shape);
grad_b = np.zeros((no, 1));
for i in range(len(self.b)):
b_try = np.array(self.b)
b_try[i] -= h
c1 = ComputeCost(X, Y, self.W, b_try, lamda)
b_try = np.array(self.b)
b_try[i] += h
c2 = ComputeCost(X, Y, self.W, b_try, lamda)
grad_b[i] = (c2-c1) / (2*h)
for i in range(self.W.shape[0]):
for j in range(self.W.shape[1]):
W_try = np.array(self.W)
W_try[i,j] -= h
c1 = ComputeCost(X, Y, W_try, self.b, lamda)
W_try = np.array(self.W)
W_try[i,j] += h
c2 = ComputeCost(X, Y, W_try, self.b, lamda)
grad_W[i,j] = (c2-c1) / (2*h)
return [grad_W, grad_b]
def computeAnalyticalGradients(self, X, Y, lamda): #Analytical computation of the gradient
P = EvaluateClassifier(X, self.W, self.b)
grad_W = np.zeros(self.W.shape)
grad_b = np.zeros(self.b.shape)
for i in range(X.shape[0]):
x = X[i].reshape(1,-1)
g = -(Y[i].reshape(-1,1) - EvaluateClassifier(x, self.W, self.b))
grad_b += g
grad_W += g.dot(x)
grad_W /= X.shape[0]
grad_W += self.W * 2 * lamda
grad_b /= X.shape[0]
return (grad_W, grad_b)
def relErr(grad1, grad2):
rel_err = np.abs(grad1 - grad2) / (np.abs(grad1) + np.abs(grad2))
return rel_err*100*100
def absErr(grad1, grad2):
abs_err = np.abs(grad1 - grad2)
return abs_err*100*100*100
def compareGradients(lamda, title):
samples = 100
FFnet = FFNet(d, K, mean, s)
grad_W1, grad_b1 = FFnet.computeAnalyticalGradients(X_train[:samples, :d], Y_train[:samples], lamda)
grad_W2, grad_b2 = FFnet.computeGradsNum(X_train[:samples, :d], Y_train[:samples], lamda)
grad_W3, grad_b3 = FFnet.computeGradsNumSlow(X_train[:samples, :d], Y_train[:samples], lamda)
err = Texttable()
err_data = []
# Compare accurate numerical method with analytical estimation of gradient
err_data.append(['Gradient', 'Method', 'Rel Diff Min [e+04]', 'Rel Diff Max [e+04]', 'Rel Diff Mean [e+04]', 'Abs Diff Max [e+06]', 'Abs Diff Mean [e+06]'])
cdm_err_W = relErr(grad_W1, grad_W3)
cdm_err_b = relErr(grad_b1, grad_b3)
cdm_err_W_abs = absErr(grad_W1, grad_W3)
cdm_err_b_abs = absErr(grad_b1, grad_b3)
fdm_err_W = relErr(grad_W1, grad_W2)
fdm_err_b = relErr(grad_b1, grad_b2)
fdm_err_W_abs = absErr(grad_W1, grad_W2)
fdm_err_b_abs = absErr(grad_b1, grad_b2)
cdm_fdm_err_W= relErr(grad_W2, grad_W3)
cdm_fdm_err_b= relErr(grad_b2, grad_b3)
cdm_fdm_err_W_abs = absErr(grad_W2, grad_W3)
cdm_fdm_err_b_abs = absErr(grad_b2, grad_b3)
err_data.append(["W", "ANL vs CDM", str(np.min(cdm_err_W)),str(np.max(cdm_err_W)),str(np.mean(cdm_err_W)),str(np.max(cdm_err_W_abs)),str(np.mean(cdm_err_W_abs))])
err_data.append(["W", "ANL vs FDM", str(np.min(fdm_err_W)),str(np.max(fdm_err_W)),str(np.mean(fdm_err_W)),str(np.max(fdm_err_W_abs)),str(np.mean(fdm_err_W_abs))])
err_data.append(["W", "CDM vs FDM", str(np.min(cdm_fdm_err_W)),str(np.max(cdm_fdm_err_W)),str(np.mean(cdm_fdm_err_W)),str(np.max(cdm_fdm_err_W_abs)),str(np.mean(cdm_fdm_err_W_abs))])
err_data.append(["b", "ANL vs CDM", str(np.min(cdm_err_b)),str(np.max(cdm_err_b)),str(np.mean(cdm_err_b)),str(np.max(cdm_err_b_abs)),str(np.mean(cdm_err_b_abs))])
err_data.append(["b", "ANL vs FDM", str(np.min(fdm_err_b)),str(np.max(fdm_err_b)),str(np.mean(fdm_err_b)),str(np.max(fdm_err_b_abs)),str(np.mean(fdm_err_b_abs))])
err_data.append(["b", "CDM vs FDM", str(np.min(cdm_fdm_err_b)),str(np.max(cdm_fdm_err_b)),str(np.mean(cdm_fdm_err_b)),str(np.max(cdm_fdm_err_b_abs)),str(np.mean(cdm_fdm_err_b_abs))])
err.add_rows(err_data)
print(title)
print(err.draw())
```
Analytical (ANL) gradient computation is in the following result compared to the slow but accurate version based on the centered difference equation (CDM) and compared to the faster but less accurate finite difference method (FDM). The accuracy can be observed in the observed in the below tables which displays relative and absolute differences between the aformentioned methods. Note that absolute differences are less than 1e-6 and thereby considered to have produced the same result.
```
compareGradients(lamda=0.0, title="Without Regularization i.e. Lambda = 0.0")
compareGradients(lamda=1.0, title="With Regularization i.e. Lambda = 1.0")
```
EXERCISE 1. PART 8.
*Function that performs the mini-batch gradient descent algorithm to learn the network's parameters*
As the below result shows, after the first epoch the cost score decreases and the accuracy increases for each epoch.
Learning rate: We can also tell from the same result, that when the learning rate (eta) is too large, the training of the model becomes unstable. This can be observed in the first figure where eta=0.1
Regularization: The effect on accuracy when applying regularization is that it is narrower between the training data and validation data in difference to when not applying it. However, without regularization the accuracy is higher. Ideal is it not to have it too wide as this can be an indication of overfitting on the training data.
```
#@title Function: Mini-batch gradient descent
class FFNet_mbGD(FFNet):
def miniBatchGD(self, X, Y, n_batch, eta, n_epochs , lamda, X_val = None, Y_val = None):
results = ([],[],[],[])
miniBatchNo = X.shape[0] // n_batch
results[0].append(ComputeCost(X, Y,self.W, self.b, lamda))
results[1].append(ComputeCost(X_val, Y_val,self.W, self.b, lamda))
results[2].append(ComputeAccuracy(X, np.argmax(Y.T, axis = 0),self.W, self.b))
results[3].append(ComputeAccuracy(X_val, np.argmax(Y_val.T, axis = 0),self.W, self.b))
for i in range(n_epochs):
for j in range(miniBatchNo):
if(j >= miniBatchNo - 1):
Xbatch = X[j * n_batch:]
Ybatch = Y[j * n_batch:]
else:
j_start = j * n_batch
j_end = j_start + n_batch
Xbatch = X[j_start:j_end]
Ybatch = Y[j_start:j_end]
grad_W, grad_b = self.computeAnalyticalGradients(Xbatch, Ybatch,lamda)
self.W -= eta * grad_W
self.b -= eta * grad_b
results[0].append(ComputeCost(X, Y, self.W, self.b, lamda))
results[1].append(ComputeCost(X_val, Y_val,self.W, self.b, lamda))
results[2].append(ComputeAccuracy(X, np.argmax(Y.T, axis = 0),self.W, self.b))
results[3].append(ComputeAccuracy(X_val, np.argmax(Y_val.T, axis = 0),self.W, self.b))
return results
#@title Code: Run mini-batch gradient descent with difference parameters
# Train for the following parameters
lambdas = [0, 0, .1, 1]
etas = [.1, .001, .001, .001]
n_batch = 100
n_epochs = 40
np.random.seed(400) #400 specified in the assignment
t = Texttable()
data = []
data.append(['Parameters', 'Train Accuracy', 'Val Accuracy', 'Test Accuracy'])
for x in range(0, len(lambdas)):
nm = FFNet_mbGD(d = X_train.shape[1], K = Y_train.shape[1], mean = 0.0, s = 0.01)
tr_loss, val_loss, tr_accuracy, val_accuracy = nm.miniBatchGD(
X_train, Y_train,
n_batch, etas[x], n_epochs, lambdas[x],
X_val = X_val, Y_val = Y_val)
saveFortbl = "lambda="+str(lambdas[x])+", n epochs="+str(n_epochs)+", n batch="+str(n_batch)+", eta="+str(etas[x])+""
print("****************************************")
print("lambda="+str(lambdas[x])+", n epochs="+str(n_epochs)+", n batch="+str(n_batch)+", eta="+str(etas[x])+"")
print("****************************************")
data.append([saveFortbl,str(tr_accuracy[-1]), str(val_accuracy[-1]),str(ComputeAccuracy(X_test, y_test, nm.W, nm.b))])
plot(tr_loss, val_loss, tr_accuracy, val_accuracy)
showImageFromWeightsWithLabels(nm.W, labels)
t.add_rows(data)
print(t.draw())
print(" ")
```
| github_jupyter |
```
"""
Overriding descriptor (a.k.a. data descriptor or enforced descriptor):
# BEGIN DESCR_KINDS_DEMO1
>>> obj = Managed() # <1>
>>> obj.over # <2>
-> Overriding.__get__(<Overriding object>, <Managed object>, <class Managed>)
>>> Managed.over # <3>
-> Overriding.__get__(<Overriding object>, None, <class Managed>)
>>> obj.over = 7 # <4>
-> Overriding.__set__(<Overriding object>, <Managed object>, 7)
>>> obj.over # <5>
-> Overriding.__get__(<Overriding object>, <Managed object>, <class Managed>)
>>> obj.__dict__['over'] = 8 # <6>
>>> vars(obj) # <7>
{'over': 8}
>>> obj.over # <8>
-> Overriding.__get__(<Overriding object>, <Managed object>, <class Managed>)
# END DESCR_KINDS_DEMO1
Overriding descriptor without ``__get__``:
(these tests are reproduced below without +ELLIPSIS directives for inclusion in the book;
look for DESCR_KINDS_DEMO2)
>>> obj.over_no_get # doctest: +ELLIPSIS
<descriptorkinds.OverridingNoGet object at 0x...>
>>> Managed.over_no_get # doctest: +ELLIPSIS
<descriptorkinds.OverridingNoGet object at 0x...>
>>> obj.over_no_get = 7
-> OverridingNoGet.__set__(<OverridingNoGet object>, <Managed object>, 7)
>>> obj.over_no_get # doctest: +ELLIPSIS
<descriptorkinds.OverridingNoGet object at 0x...>
>>> obj.__dict__['over_no_get'] = 9
>>> obj.over_no_get
9
>>> obj.over_no_get = 7
-> OverridingNoGet.__set__(<OverridingNoGet object>, <Managed object>, 7)
>>> obj.over_no_get
9
Non-overriding descriptor (a.k.a. non-data descriptor or shadowable descriptor):
# BEGIN DESCR_KINDS_DEMO3
>>> obj = Managed()
>>> obj.non_over # <1>
-> NonOverriding.__get__(<NonOverriding object>, <Managed object>, <class Managed>)
>>> obj.non_over = 7 # <2>
>>> obj.non_over # <3>
7
>>> Managed.non_over # <4>
-> NonOverriding.__get__(<NonOverriding object>, None, <class Managed>)
>>> del obj.non_over # <5>
>>> obj.non_over # <6>
-> NonOverriding.__get__(<NonOverriding object>, <Managed object>, <class Managed>)
# END DESCR_KINDS_DEMO3
No descriptor type survives being overwritten on the class itself:
# BEGIN DESCR_KINDS_DEMO4
>>> obj = Managed() # <1>
>>> Managed.over = 1 # <2>
>>> Managed.over_no_get = 2
>>> Managed.non_over = 3
>>> obj.over, obj.over_no_get, obj.non_over # <3>
(1, 2, 3)
# END DESCR_KINDS_DEMO4
Methods are non-overriding descriptors:
>>> obj.spam # doctest: +ELLIPSIS
<bound method Managed.spam of <descriptorkinds.Managed object at 0x...>>
>>> Managed.spam # doctest: +ELLIPSIS
<function Managed.spam at 0x...>
>>> obj.spam()
-> Managed.spam(<Managed object>)
>>> Managed.spam()
Traceback (most recent call last):
...
TypeError: spam() missing 1 required positional argument: 'self'
>>> Managed.spam(obj)
-> Managed.spam(<Managed object>)
>>> Managed.spam.__get__(obj) # doctest: +ELLIPSIS
<bound method Managed.spam of <descriptorkinds.Managed object at 0x...>>
>>> obj.spam.__func__ is Managed.spam
True
>>> obj.spam = 7
>>> obj.spam
7
"""
"""
NOTE: These tests are here because I can't add callouts after +ELLIPSIS
directives and if doctest runs them without +ELLIPSIS I get test failures.
# BEGIN DESCR_KINDS_DEMO2
>>> obj.over_no_get # <1>
<__main__.OverridingNoGet object at 0x665bcc>
>>> Managed.over_no_get # <2>
<__main__.OverridingNoGet object at 0x665bcc>
>>> obj.over_no_get = 7 # <3>
-> OverridingNoGet.__set__(<OverridingNoGet object>, <Managed object>, 7)
>>> obj.over_no_get # <4>
<__main__.OverridingNoGet object at 0x665bcc>
>>> obj.__dict__['over_no_get'] = 9 # <5>
>>> obj.over_no_get # <6>
9
>>> obj.over_no_get = 7 # <7>
-> OverridingNoGet.__set__(<OverridingNoGet object>, <Managed object>, 7)
>>> obj.over_no_get # <8>
9
# END DESCR_KINDS_DEMO2
Methods are non-overriding descriptors:
# BEGIN DESCR_KINDS_DEMO5
>>> obj = Managed()
>>> obj.spam # <1>
<bound method Managed.spam of <descriptorkinds.Managed object at 0x74c80c>>
>>> Managed.spam # <2>
<function Managed.spam at 0x734734>
>>> obj.spam = 7 # <3>
>>> obj.spam
7
# END DESCR_KINDS_DEMO5
"""
# BEGIN DESCR_KINDS
### auxiliary functions for display only ###
def cls_name(obj_or_cls):
cls = type(obj_or_cls)
if cls is type:
cls = obj_or_cls
return cls.__name__.split('.')[-1]
def display(obj):
cls = type(obj)
if cls is type:
return '<class {}>'.format(obj.__name__)
elif cls in [type(None), int]:
return repr(obj)
else:
return '<{} object>'.format(cls_name(obj))
def print_args(name, *args):
pseudo_args = ', '.join(display(x) for x in args)
print('-> {}.__{}__({})'.format(cls_name(args[0]), name, pseudo_args))
### essential classes for this example ###
class Overriding: # <1>
"""a.k.a. data descriptor or enforced descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner) # <2>
def __set__(self, instance, value):
print_args('set', self, instance, value)
class OverridingNoGet: # <3>
"""an overriding descriptor without ``__get__``"""
def __set__(self, instance, value):
print_args('set', self, instance, value)
class NonOverriding: # <4>
"""a.k.a. non-data or shadowable descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner)
class Managed: # <5>
over = Overriding()
over_no_get = OverridingNoGet()
non_over = NonOverriding()
def spam(self): # <6>
print('-> Managed.spam({})'.format(display(self)))
# END DESCR_KINDS
"""
Overriding descriptor (a.k.a. data descriptor or enforced descriptor):
>>> obj = Model()
>>> obj.over # doctest: +ELLIPSIS
Overriding.__get__() invoked with args:
self = <descriptorkinds.Overriding object at 0x...>
instance = <descriptorkinds.Model object at 0x...>
owner = <class 'descriptorkinds.Model'>
>>> Model.over # doctest: +ELLIPSIS
Overriding.__get__() invoked with args:
self = <descriptorkinds.Overriding object at 0x...>
instance = None
owner = <class 'descriptorkinds.Model'>
An overriding descriptor cannot be shadowed by assigning to an instance:
>>> obj = Model()
>>> obj.over = 7 # doctest: +ELLIPSIS
Overriding.__set__() invoked with args:
self = <descriptorkinds.Overriding object at 0x...>
instance = <descriptorkinds.Model object at 0x...>
value = 7
>>> obj.over # doctest: +ELLIPSIS
Overriding.__get__() invoked with args:
self = <descriptorkinds.Overriding object at 0x...>
instance = <descriptorkinds.Model object at 0x...>
owner = <class 'descriptorkinds.Model'>
Not even by poking the attribute into the instance ``__dict__``:
>>> obj.__dict__['over'] = 8
>>> obj.over # doctest: +ELLIPSIS
Overriding.__get__() invoked with args:
self = <descriptorkinds.Overriding object at 0x...>
instance = <descriptorkinds.Model object at 0x...>
owner = <class 'descriptorkinds.Model'>
>>> vars(obj)
{'over': 8}
Overriding descriptor without ``__get__``:
>>> obj.over_no_get # doctest: +ELLIPSIS
<descriptorkinds.OverridingNoGet object at 0x...>
>>> Model.over_no_get # doctest: +ELLIPSIS
<descriptorkinds.OverridingNoGet object at 0x...>
>>> obj.over_no_get = 7 # doctest: +ELLIPSIS
OverridingNoGet.__set__() invoked with args:
self = <descriptorkinds.OverridingNoGet object at 0x...>
instance = <descriptorkinds.Model object at 0x...>
value = 7
>>> obj.over_no_get # doctest: +ELLIPSIS
<descriptorkinds.OverridingNoGet object at 0x...>
Poking the attribute into the instance ``__dict__`` means you can read the new
value for the attribute, but setting it still triggers ``__set__``:
>>> obj.__dict__['over_no_get'] = 9
>>> obj.over_no_get
9
>>> obj.over_no_get = 7 # doctest: +ELLIPSIS
OverridingNoGet.__set__() invoked with args:
self = <descriptorkinds.OverridingNoGet object at 0x...>
instance = <descriptorkinds.Model object at 0x...>
value = 7
>>> obj.over_no_get
9
Non-overriding descriptor (a.k.a. non-data descriptor or shadowable descriptor):
>>> obj = Model()
>>> obj.non_over # doctest: +ELLIPSIS
NonOverriding.__get__() invoked with args:
self = <descriptorkinds.NonOverriding object at 0x...>
instance = <descriptorkinds.Model object at 0x...>
owner = <class 'descriptorkinds.Model'>
>>> Model.non_over # doctest: +ELLIPSIS
NonOverriding.__get__() invoked with args:
self = <descriptorkinds.NonOverriding object at 0x...>
instance = None
owner = <class 'descriptorkinds.Model'>
A non-overriding descriptor can be shadowed by assigning to an instance:
>>> obj.non_over = 7
>>> obj.non_over
7
Methods are non-over descriptors:
>>> obj.spam # doctest: +ELLIPSIS
<bound method Model.spam of <descriptorkinds.Model object at 0x...>>
>>> Model.spam # doctest: +ELLIPSIS
<function Model.spam at 0x...>
>>> obj.spam() # doctest: +ELLIPSIS
Model.spam() invoked with arg:
self = <descriptorkinds.Model object at 0x...>
>>> obj.spam = 7
>>> obj.spam
7
No descriptor type survives being overwritten on the class itself:
>>> Model.over = 1
>>> obj.over
1
>>> Model.over_no_get = 2
>>> obj.over_no_get
2
>>> Model.non_over = 3
>>> obj.non_over
7
"""
# BEGIN DESCRIPTORKINDS
def print_args(name, *args): # <1>
cls_name = args[0].__class__.__name__
arg_names = ['self', 'instance', 'owner']
if name == 'set':
arg_names[-1] = 'value'
print('{}.__{}__() invoked with args:'.format(cls_name, name))
for arg_name, value in zip(arg_names, args):
print(' {:8} = {}'.format(arg_name, value))
class Overriding: # <2>
"""a.k.a. data descriptor or enforced descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner) # <3>
def __set__(self, instance, value):
print_args('set', self, instance, value)
class OverridingNoGet: # <4>
"""an overriding descriptor without ``__get__``"""
def __set__(self, instance, value):
print_args('set', self, instance, value)
class NonOverriding: # <5>
"""a.k.a. non-data or shadowable descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner)
class Model: # <6>
over = Overriding()
over_no_get = OverridingNoGet()
non_over = NonOverriding()
def spam(self): # <7>
print('Model.spam() invoked with arg:')
print(' self =', self)
#END DESCRIPTORKINDS
"""
# BEGIN FUNC_DESCRIPTOR_DEMO
>>> word = Text('forward')
>>> word # <1>
Text('forward')
>>> word.reverse() # <2>
Text('drawrof')
>>> Text.reverse(Text('backward')) # <3>
Text('drawkcab')
>>> type(Text.reverse), type(word.reverse) # <4>
(<class 'function'>, <class 'method'>)
>>> list(map(Text.reverse, ['repaid', (10, 20, 30), Text('stressed')])) # <5>
['diaper', (30, 20, 10), Text('desserts')]
>>> Text.reverse.__get__(word) # <6>
<bound method Text.reverse of Text('forward')>
>>> Text.reverse.__get__(None, Text) # <7>
<function Text.reverse at 0x101244e18>
>>> word.reverse # <8>
<bound method Text.reverse of Text('forward')>
>>> word.reverse.__self__ # <9>
Text('forward')
>>> word.reverse.__func__ is Text.reverse # <10>
True
# END FUNC_DESCRIPTOR_DEMO
"""
# BEGIN FUNC_DESCRIPTOR_EX
import collections
class Text(collections.UserString):
def __repr__(self):
return 'Text({!r})'.format(self.data)
def reverse(self):
return self[::-1]
# END FUNC_DESCRIPTOR_EX
# %load ./bulkfood/bulkfood_v3.py
"""
A line item for a bulk food order has description, weight and price fields::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> raisins.weight, raisins.description, raisins.price
(10, 'Golden raisins', 6.95)
A ``subtotal`` method gives the total price for that line item::
>>> raisins.subtotal()
69.5
The weight of a ``LineItem`` must be greater than 0::
>>> raisins.weight = -20
Traceback (most recent call last):
...
ValueError: value must be > 0
Negative or 0 price is not acceptable either::
>>> truffle = LineItem('White truffle', 100, 0)
Traceback (most recent call last):
...
ValueError: value must be > 0
No change was made::
>>> raisins.weight
10
"""
# BEGIN LINEITEM_V3
class Quantity: # <1>
def __init__(self, storage_name):
self.storage_name = storage_name # <2>
def __set__(self, instance, value): # <3>
if value > 0:
instance.__dict__[self.storage_name] = value # <4>
else:
raise ValueError('value must be > 0')
class LineItem:
weight = Quantity('weight') # <5>
price = Quantity('price') # <6>
def __init__(self, description, weight, price): # <7>
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V3
# %load ./bulkfood/bulkfood_v4.py
"""
A line item for a bulk food order has description, weight and price fields::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> raisins.weight, raisins.description, raisins.price
(10, 'Golden raisins', 6.95)
A ``subtotal`` method gives the total price for that line item::
>>> raisins.subtotal()
69.5
The weight of a ``LineItem`` must be greater than 0::
>>> raisins.weight = -20
Traceback (most recent call last):
...
ValueError: value must be > 0
No change was made::
>>> raisins.weight
10
The value of the attributes managed by the descriptors are stored in
alternate attributes, created by the descriptors in each ``LineItem``
instance::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['_Quantity#0', '_Quantity#1', '__class__', ...
'description', 'price', 'subtotal', 'weight']
>>> getattr(raisins, '_Quantity#0')
10
>>> getattr(raisins, '_Quantity#1')
6.95
"""
# BEGIN LINEITEM_V4
class Quantity:
__counter = 0 # <1>
def __init__(self):
cls = self.__class__ # <2>
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index) # <3>
cls.__counter += 1 # <4>
def __get__(self, instance, owner): # <5>
return getattr(instance, self.storage_name) # <6>
def __set__(self, instance, value):
if value > 0:
setattr(instance, self.storage_name, value) # <7>
else:
raise ValueError('value must be > 0')
class LineItem:
weight = Quantity() # <8>
price = Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V4
# %load ./bulkfood/bulkfood_v4b.py
"""
A line item for a bulk food order has description, weight and price fields::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> raisins.weight, raisins.description, raisins.price
(10, 'Golden raisins', 6.95)
A ``subtotal`` method gives the total price for that line item::
>>> raisins.subtotal()
69.5
The weight of a ``LineItem`` must be greater than 0::
>>> raisins.weight = -20
Traceback (most recent call last):
...
ValueError: value must be > 0
No change was made::
>>> raisins.weight
10
The value of the attributes managed by the descriptors are stored in
alternate attributes, created by the descriptors in each ``LineItem``
instance::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['_Quantity#0', '_Quantity#1', '__class__', ...
'description', 'price', 'subtotal', 'weight']
>>> getattr(raisins, '_Quantity#0')
10
>>> getattr(raisins, '_Quantity#1')
6.95
If the descriptor is accessed in the class, the descriptor object is
returned:
>>> LineItem.weight # doctest: +ELLIPSIS
<bulkfood_v4b.Quantity object at 0x...>
>>> LineItem.weight.storage_name
'_Quantity#0'
"""
# BEGIN LINEITEM_V4B
class Quantity:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self # <1>
else:
return getattr(instance, self.storage_name) # <2>
def __set__(self, instance, value):
if value > 0:
setattr(instance, self.storage_name, value)
else:
raise ValueError('value must be > 0')
# END LINEITEM_V4B
class LineItem:
weight = Quantity()
price = Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# %load ./bulkfood/bulkfood_v4c.py
"""
A line item for a bulk food order has description, weight and price fields::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> raisins.weight, raisins.description, raisins.price
(10, 'Golden raisins', 6.95)
A ``subtotal`` method gives the total price for that line item::
>>> raisins.subtotal()
69.5
The weight of a ``LineItem`` must be greater than 0::
>>> raisins.weight = -20
Traceback (most recent call last):
...
ValueError: value must be > 0
No change was made::
>>> raisins.weight
10
The value of the attributes managed by the descriptors are stored in
alternate attributes, created by the descriptors in each ``LineItem``
instance::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['_Quantity#0', '_Quantity#1', '__class__', ...
'description', 'price', 'subtotal', 'weight']
>>> getattr(raisins, '_Quantity#0')
10
>>> getattr(raisins, '_Quantity#1')
6.95
If the descriptor is accessed in the class, the descriptor object is
returned:
>>> LineItem.weight # doctest: +ELLIPSIS
<model_v4c.Quantity object at 0x...>
>>> LineItem.weight.storage_name
'_Quantity#0'
"""
# BEGIN LINEITEM_V4C
import model_v4c as model # <1>
class LineItem:
weight = model.Quantity() # <2>
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V4C
# %load ./bulkfood/bulkfood_v4prop.py
"""
A line item for a bulk food order has description, weight and price fields::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> raisins.weight, raisins.description, raisins.price
(10, 'Golden raisins', 6.95)
A ``subtotal`` method gives the total price for that line item::
>>> raisins.subtotal()
69.5
The weight of a ``LineItem`` must be greater than 0::
>>> raisins.weight = -20
Traceback (most recent call last):
...
ValueError: value must be > 0
No change was made::
>>> raisins.weight
10
The value of the attributes managed by the descriptors are stored in
alternate attributes, created by the descriptors in each ``LineItem``
instance::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
[... '_quantity:0', '_quantity:1', 'description',
'price', 'subtotal', 'weight']
>>> getattr(raisins, '_quantity:0')
10
>>> getattr(raisins, '_quantity:1')
6.95
"""
# BEGIN LINEITEM_V4_PROP
def quantity(): # <1>
try:
quantity.counter += 1 # <2>
except AttributeError:
quantity.counter = 0 # <3>
storage_name = '_{}:{}'.format('quantity', quantity.counter) # <4>
def qty_getter(instance): # <5>
return getattr(instance, storage_name)
def qty_setter(instance, value):
if value > 0:
setattr(instance, storage_name, value)
else:
raise ValueError('value must be > 0')
return property(qty_getter, qty_setter)
# END LINEITEM_V4_PROP
class LineItem:
weight = quantity()
price = quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# %load ./bulkfood/model_v4c.py
# BEGIN MODEL_V4
class Quantity:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self
else:
return getattr(instance, self.storage_name)
def __set__(self, instance, value):
if value > 0:
setattr(instance, self.storage_name, value)
else:
raise ValueError('value must be > 0')
# END MODEL_V4
# %load ./bulkfood/bulkfood_v5.py
"""
A line item for a bulk food order has description, weight and price fields::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> raisins.weight, raisins.description, raisins.price
(10, 'Golden raisins', 6.95)
A ``subtotal`` method gives the total price for that line item::
>>> raisins.subtotal()
69.5
The weight of a ``LineItem`` must be greater than 0::
>>> raisins.weight = -20
Traceback (most recent call last):
...
ValueError: value must be > 0
No change was made::
>>> raisins.weight
10
The value of the attributes managed by the descriptors are stored in
alternate attributes, created by the descriptors in each ``LineItem``
instance::
>>> raisins = LineItem('Golden raisins', 10, 6.95)
>>> dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['_NonBlank#0', '_Quantity#0', '_Quantity#1', '__class__', ...
'description', 'price', 'subtotal', 'weight']
>>> getattr(raisins, '_Quantity#0')
10
>>> getattr(raisins, '_NonBlank#0')
'Golden raisins'
If the descriptor is accessed in the class, the descriptor object is
returned:
>>> LineItem.weight # doctest: +ELLIPSIS
<model_v5.Quantity object at 0x...>
>>> LineItem.weight.storage_name
'_Quantity#0'
The `NonBlank` descriptor prevents empty or blank strings to be used
for the description:
>>> br_nuts = LineItem('Brazil Nuts', 10, 34.95)
>>> br_nuts.description = ' '
Traceback (most recent call last):
...
ValueError: value cannot be empty or blank
>>> void = LineItem('', 1, 1)
Traceback (most recent call last):
...
ValueError: value cannot be empty or blank
"""
# BEGIN LINEITEM_V5
import model_v5 as model # <1>
class LineItem:
description = model.NonBlank() # <2>
weight = model.Quantity()
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V5
```
| github_jupyter |
<div class="alert alert-block alert-info" style="margin-top: 20px">
<a href="https://cocl.us/PY0101EN_edx_add_top">
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/TopAd.png" width="750" align="center">
</a>
</div>
<a href="https://cognitiveclass.ai/">
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png" width="200" align="center">
</a>
<h1>Lists in Python</h1>
<p><strong>Welcome!</strong> This notebook will teach you about the lists in the Python Programming Language. By the end of this lab, you'll know the basics list operations in Python, including indexing, list operations and copy/clone list.</p>
<h2>Table of Contents</h2>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ul>
<li>
<a href="#dataset">About the Dataset</a>
</li>
<li>
<a href="#list">Lists</a>
<ul>
<li><a href="index">Indexing</a></li>
<li><a href="content">List Content</a></li>
<li><a href="op">List Operations</a></li>
<li><a href="co">Copy and Clone List</a></li>
</ul>
</li>
<li>
<a href="#quiz">Quiz on Lists</a>
</li>
</ul>
<p>
Estimated time needed: <strong>15 min</strong>
</p>
</div>
<hr>
<h2 id="#dataset">About the Dataset</h2>
Imagine you received album recommendations from your friends and compiled all of the recommandations into a table, with specific information about each album.
The table has one row for each movie and several columns:
- **artist** - Name of the artist
- **album** - Name of the album
- **released_year** - Year the album was released
- **length_min_sec** - Length of the album (hours,minutes,seconds)
- **genre** - Genre of the album
- **music_recording_sales_millions** - Music recording sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)
- **claimed_sales_millions** - Album's claimed sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)
- **date_released** - Date on which the album was released
- **soundtrack** - Indicates if the album is the movie soundtrack (Y) or (N)
- **rating_of_friends** - Indicates the rating from your friends from 1 to 10
<br>
<br>
The dataset can be seen below:
<font size="1">
<table font-size:xx-small style="width:100%">
<tr>
<th>Artist</th>
<th>Album</th>
<th>Released</th>
<th>Length</th>
<th>Genre</th>
<th>Music recording sales (millions)</th>
<th>Claimed sales (millions)</th>
<th>Released</th>
<th>Soundtrack</th>
<th>Rating (friends)</th>
</tr>
<tr>
<td>Michael Jackson</td>
<td>Thriller</td>
<td>1982</td>
<td>00:42:19</td>
<td>Pop, rock, R&B</td>
<td>46</td>
<td>65</td>
<td>30-Nov-82</td>
<td></td>
<td>10.0</td>
</tr>
<tr>
<td>AC/DC</td>
<td>Back in Black</td>
<td>1980</td>
<td>00:42:11</td>
<td>Hard rock</td>
<td>26.1</td>
<td>50</td>
<td>25-Jul-80</td>
<td></td>
<td>8.5</td>
</tr>
<tr>
<td>Pink Floyd</td>
<td>The Dark Side of the Moon</td>
<td>1973</td>
<td>00:42:49</td>
<td>Progressive rock</td>
<td>24.2</td>
<td>45</td>
<td>01-Mar-73</td>
<td></td>
<td>9.5</td>
</tr>
<tr>
<td>Whitney Houston</td>
<td>The Bodyguard</td>
<td>1992</td>
<td>00:57:44</td>
<td>Soundtrack/R&B, soul, pop</td>
<td>26.1</td>
<td>50</td>
<td>25-Jul-80</td>
<td>Y</td>
<td>7.0</td>
</tr>
<tr>
<td>Meat Loaf</td>
<td>Bat Out of Hell</td>
<td>1977</td>
<td>00:46:33</td>
<td>Hard rock, progressive rock</td>
<td>20.6</td>
<td>43</td>
<td>21-Oct-77</td>
<td></td>
<td>7.0</td>
</tr>
<tr>
<td>Eagles</td>
<td>Their Greatest Hits (1971-1975)</td>
<td>1976</td>
<td>00:43:08</td>
<td>Rock, soft rock, folk rock</td>
<td>32.2</td>
<td>42</td>
<td>17-Feb-76</td>
<td></td>
<td>9.5</td>
</tr>
<tr>
<td>Bee Gees</td>
<td>Saturday Night Fever</td>
<td>1977</td>
<td>1:15:54</td>
<td>Disco</td>
<td>20.6</td>
<td>40</td>
<td>15-Nov-77</td>
<td>Y</td>
<td>9.0</td>
</tr>
<tr>
<td>Fleetwood Mac</td>
<td>Rumours</td>
<td>1977</td>
<td>00:40:01</td>
<td>Soft rock</td>
<td>27.9</td>
<td>40</td>
<td>04-Feb-77</td>
<td></td>
<td>9.5</td>
</tr>
</table></font>
<hr>
<h2 id="list">Lists</h2>
<h3 id="index">Indexing</h3>
We are going to take a look at lists in Python. A list is a sequenced collection of different objects such as integers, strings, and other lists as well. The address of each element within a list is called an <b>index</b>. An index is used to access and refer to items within a list.
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsIndex.png" width="1000" />
To create a list, type the list within square brackets <b>[ ]</b>, with your content inside the parenthesis and separated by commas. Let’s try it!
```
# Create a list
L = ["Michael Jackson", 10.1, 1982]
L
```
We can use negative and regular indexing with a list :
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsNeg.png" width="1000" />
```
# Print the elements on each index
print('the same element using negative and positive indexing:\n Postive:',L[0],
'\n Negative:' , L[-3] )
print('the same element using negative and positive indexing:\n Postive:',L[1],
'\n Negative:' , L[-2] )
print('the same element using negative and positive indexing:\n Postive:',L[2],
'\n Negative:' , L[-1] )
```
<h3 id="content">List Content</h3>
Lists can contain strings, floats, and integers. We can nest other lists, and we can also nest tuples and other data structures. The same indexing conventions apply for nesting:
```
# Sample List
["Michael Jackson", 10.1, 1982, [1, 2], ("A", 1)]
```
<h3 id="op">List Operations</h3>
We can also perform slicing in lists. For example, if we want the last two elements, we use the following command:
```
# Sample List
L = ["Michael Jackson", 10.1,1982,"MJ",1]
L
```
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsSlice.png" width="1000">
```
# List slicing
L[3:5]
```
We can use the method <code>extend</code> to add new elements to the list:
```
# Use extend to add elements to list
L = [ "Michael Jackson", 10.2]
L.extend(['pop', 10])
L
```
Another similar method is <code>append</code>. If we apply <code>append</code> instead of <code>extend</code>, we add one element to the list:
```
# Use append to add elements to list
L = [ "Michael Jackson", 10.2]
L.append(['pop', 10])
L
```
Each time we apply a method, the list changes. If we apply <code>extend</code> we add two new elements to the list. The list <code>L</code> is then modified by adding two new elements:
```
# Use extend to add elements to list
L = [ "Michael Jackson", 10.2]
L.extend(['pop', 10])
L
```
If we append the list <code>['a','b']</code> we have one new element consisting of a nested list:
```
# Use append to add elements to list
L.append(['a','b'])
L
```
As lists are mutable, we can change them. For example, we can change the first element as follows:
```
# Change the element based on the index
A = ["disco", 10, 1.2]
print('Before change:', A)
A[0] = 'hard rock'
print('After change:', A)
```
We can also delete an element of a list using the <code>del</code> command:
```
# Delete the element based on the index
print('Before change:', A)
del(A[0])
print('After change:', A)
```
We can convert a string to a list using <code>split</code>. For example, the method <code>split</code> translates every group of characters separated by a space into an element in a list:
```
# Split the string, default is by space
'hard rock'.split()
```
We can use the split function to separate strings on a specific character. We pass the character we would like to split on into the argument, which in this case is a comma. The result is a list, and each element corresponds to a set of characters that have been separated by a comma:
```
# Split the string by comma
'A,B,C,D'.split(',')
```
<h3 id="co">Copy and Clone List</h3>
When we set one variable <b>B</b> equal to <b>A</b>; both <b>A</b> and <b>B</b> are referencing the same list in memory:
```
# Copy (copy by reference) the list A
A = ["hard rock", 10, 1.2]
B = A
print('A:', A)
print('B:', B)
```
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsRef.png" width="1000" align="center">
Initially, the value of the first element in <b>B</b> is set as hard rock. If we change the first element in <b>A</b> to <b>banana</b>, we get an unexpected side effect. As <b>A</b> and <b>B</b> are referencing the same list, if we change list <b>A</b>, then list <b>B</b> also changes. If we check the first element of <b>B</b> we get banana instead of hard rock:
```
# Examine the copy by reference
print('B[0]:', B[0])
A[0] = "banana"
print('B[0]:', B[0])
```
This is demonstrated in the following figure:
<img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsRefGif.gif" width="1000" />
You can clone list **A** by using the following syntax:
```
# Clone (clone by value) the list A
B = A[:]
B
```
Variable **B** references a new copy or clone of the original list; this is demonstrated in the following figure:
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsVal.gif" width="1000" />
Now if you change <b>A</b>, <b>B</b> will not change:
```
print('B[0]:', B[0])
A[0] = "hard rock"
print('B[0]:', B[0])
```
<h2 id="quiz">Quiz on List</h2>
Create a list <code>a_list</code>, with the following elements <code>1</code>, <code>hello</code>, <code>[1,2,3]</code> and <code>True</code>.
```
# Write your code below and press Shift+Enter to execute
a_list=[1, 'hello', [1,2,3],True]
a_list
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
a_list = [1, 'hello', [1, 2, 3] , True]
a_list
-->
Find the value stored at index 1 of <code>a_list</code>.
```
# Write your code below and press Shift+Enter to execute
a_list[1]
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
a_list[1]
-->
Retrieve the elements stored at index 1, 2 and 3 of <code>a_list</code>.
```
# Write your code below and press Shift+Enter to execute
a_list[1:4]
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
a_list[1:4]
-->
Concatenate the following lists <code>A = [1, 'a']</code> and <code>B = [2, 1, 'd']</code>:
```
# Write your code below and press Shift+Enter to execute
A = [1, 'a']
B = [2, 1, 'd']
A=A+B
A
```
Double-click <b>here</b> for the solution.
<!-- Your answer is below:
A = [1, 'a']
B = [2, 1, 'd']
A + B
-->
<hr>
<h2>The last exercise!</h2>
<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work.
<hr>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<h2>Get IBM Watson Studio free of charge!</h2>
<p><a href="https://cocl.us/PY0101EN_edx_add_bbottom"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png" width="750" align="center"></a></p>
</div>
<h3>About the Authors:</h3>
<p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
Other contributors: <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a>
<hr>
<p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Natural-Language-Pre-Processing" data-toc-modified-id="Natural-Language-Pre-Processing-1"><span class="toc-item-num">1 </span>Natural Language Pre-Processing</a></span></li><li><span><a href="#Objectives" data-toc-modified-id="Objectives-2"><span class="toc-item-num">2 </span>Objectives</a></span></li><li><span><a href="#Overview-of-NLP" data-toc-modified-id="Overview-of-NLP-3"><span class="toc-item-num">3 </span>Overview of NLP</a></span></li><li><span><a href="#Preprocessing-for-NLP" data-toc-modified-id="Preprocessing-for-NLP-4"><span class="toc-item-num">4 </span>Preprocessing for NLP</a></span><ul class="toc-item"><li><span><a href="#Tokenization" data-toc-modified-id="Tokenization-4.1"><span class="toc-item-num">4.1 </span>Tokenization</a></span></li></ul></li><li><span><a href="#Text-Cleaning" data-toc-modified-id="Text-Cleaning-5"><span class="toc-item-num">5 </span>Text Cleaning</a></span><ul class="toc-item"><li><span><a href="#Capitalization" data-toc-modified-id="Capitalization-5.1"><span class="toc-item-num">5.1 </span>Capitalization</a></span></li><li><span><a href="#Punctuation" data-toc-modified-id="Punctuation-5.2"><span class="toc-item-num">5.2 </span>Punctuation</a></span></li><li><span><a href="#Stopwords" data-toc-modified-id="Stopwords-5.3"><span class="toc-item-num">5.3 </span>Stopwords</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Numerals" data-toc-modified-id="Numerals-5.3.0.1"><span class="toc-item-num">5.3.0.1 </span>Numerals</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#Regex" data-toc-modified-id="Regex-6"><span class="toc-item-num">6 </span>Regex</a></span><ul class="toc-item"><li><span><a href="#RegexpTokenizer()" data-toc-modified-id="RegexpTokenizer()-6.1"><span class="toc-item-num">6.1 </span><code>RegexpTokenizer()</code></a></span></li></ul></li><li><span><a href="#Exercise:-NL-Pre-Processing" data-toc-modified-id="Exercise:-NL-Pre-Processing-7"><span class="toc-item-num">7 </span>Exercise: NL Pre-Processing</a></span></li></ul></div>
# Natural Language Pre-Processing
```
# Use this to install nltk if needed
!pip install nltk
# !conda install -c anaconda nltk
%load_ext autoreload
%autoreload 2
import os
import sys
module_path = os.path.abspath(os.path.join(os.pardir, os.pardir))
if module_path not in sys.path:
sys.path.append(module_path)
import pandas as pd
import nltk
from nltk.probability import FreqDist
from nltk.corpus import stopwords
from nltk.tokenize import regexp_tokenize, word_tokenize, RegexpTokenizer
import matplotlib.pyplot as plt
import string
import re
# Use this to download the stopwords if you haven't already - only ever needs to be run once
nltk.download("stopwords")
```
# Objectives
- Describe the basic concepts of NLP
- Use pre-processing methods for NLP
- Tokenization
- Stopwords removal
# Overview of NLP
NLP allows computers to interact with text data in a structured and sensible way. In short, we will be breaking up series of texts into individual words (or groups of words), and isolating the words with **semantic value**. We will then compare texts with similar distributions of these words, and group them together.
In this section, we will discuss some steps and approaches to common text data analytic procedures. Some of the applications of natural language processing are:
- Chatbots
- Speech recognition and audio processing
- Classifying documents
Here is an example that uses some of the tools we use in this notebook.
-[chicago_justice classifier](https://github.com/chicago-justice-project/article-tagging/blob/master/lib/notebooks/bag-of-words-count-stemmed-binary.ipynb)
We will introduce you to the preprocessing steps, feature engineering, and other steps you need to take in order to format text data for machine learning tasks.
We will also introduce you to [**NLTK**](https://www.nltk.org/) (Natural Language Toolkit), which will be our main tool for engaging with textual data.
<img src="img/nlp_process.png" style="width:1000px;">
```
#No hard rule for model, could be knn, rfc, etc.
```
# Preprocessing for NLP
```
#Curse of dimensionality
```
The goal when pre-processing text data for NLP is to remove as many unnecessary words as possible while preserving as much semantic meaning as possible. This will improve your model performance dramatically.
You can think of this sort of like dimensionality reduction. The unique words in your corpus form a **vocabulary**, and each word in your vocabulary is essentially another feature in your model. So we want to get rid of unnecessary words and consolidate words that have similar meanings.
We will be working with a dataset which includes both satirical** (The Onion) and real news (Reuters) articles. We refer to the entire set of articles as the **corpus**.
 
```
corpus = pd.read_csv('data/satire_nosatire.csv')
corpus.shape
corpus.tail()
```
Our goal is to detect satire, so our target class of 1 is associated with The Onion articles.
```
corpus.loc[10].body
corpus.loc[10].target
corpus.loc[502].body
corpus.loc[502].target
```
Each article in the corpus is refered to as a **document**.
It is a balanced dataset with 500 documents of each category.
```
corpus.target.value_counts()
```
**Discussion:** Let's think about the use cases of being able to correctly separate satirical from authentic news. What might be a real-world use case?
```
# Thoughts here
```
## Tokenization
In order to convert the texts into data suitable for machine learning, we need to break down the documents into smaller parts.
The first step in doing that is **tokenization**.
Tokenization is the process of splitting documents into units of observations. We usually represent the tokens as __n-grams__, where n represent the number of consecutive words occuring in a document that we will consider a unit. In the case of unigrams (one-word tokens), the sentence "David works here" would be tokenized into:
- "David", "works", "here";
If we want (also) to consider bigrams, we would (also) consider:
- "David works" and "works here".
Let's consider the first document in our corpus:
```
first_document = corpus.iloc[0].body
first_document
sample_document = corpus.iloc[1].body
sample_document
```
There are many ways to tokenize our document.
It is a long string, so the first way we might consider is to split it by spaces.
**Knowledge Check:** How would we split our documents into words using spaces?
<p>
</p>
<details>
<summary><b><u>Click Here for Answer Code</u></b></summary>
first_document.split(' ')
</details>
```
# code
sample_document.split()
```
But this is not ideal. We are trying to create a set of tokens with **high semantic value**. In other words, we want to isolate text which best represents the meaning in each document.
# Text Cleaning
Most NL Pre-Processing will include the following tasks:
1. Remove capitalization
2. Remove punctuation
3. Remove stopwords
4. Remove numbers
We could manually perform all of these tasks with string operations.
## Capitalization
When we create our matrix of words associated with our corpus, **capital letters** will mess things up. The semantic value of a word used at the beginning of a sentence is the same as that same word in the middle of the sentence. In the two sentences:
sentence_one = "Excessive gerrymandering in small counties suppresses turnout."
sentence_two = "Turnout is suppressed in small counties by excessive gerrymandering."
'excessive' has the same semantic value, but will be treated as different tokens because of capitals.
```
sentence_one = "Excessive gerrymandering in small counties suppresses turnout."
sentence_two = "Turnout is suppressed in small counties by excessive gerrymandering."
Excessive = sentence_one.split(' ')[0]
excessive = sentence_two.split(' ')[-2]
print(excessive, Excessive)
excessive == Excessive
manual_cleanup = [word.lower() for word in first_document.split(' ')]
print(f"Our initial token set for our first document is {len(manual_cleanup)} words long")
print(f"Our initial token set for our first document has \
{len(set(first_document.split()))} unique words")
print(f"After removing capitals, our first document has \
{len(set(manual_cleanup))} unique words")
```
## Punctuation
Like capitals, splitting on white space will create tokens which include punctuation that will muck up our semantics.
Returning to the above example, 'gerrymandering' and 'gerrymandering.' will be treated as different tokens.
```
no_punct = sentence_one.split(' ')[1]
punct = sentence_two.split(' ')[-1]
print(no_punct, punct)
no_punct == punct
## Manual removal of punctuation
string.punctuation
manual_cleanup = [s.translate(str.maketrans('', '', string.punctuation))\
for s in manual_cleanup]
print(f"After removing punctuation, our first document has \
{len(set(manual_cleanup))} unique words")
manual_cleanup[:10]
```
## Stopwords
Stopwords are the **filler** words in a language: prepositions, articles, conjunctions. They have low semantic value, and often need to be removed.
Luckily, NLTK has lists of stopwords ready for our use.
```
stopwords.words('english')[:10]
stopwords.words('greek')[:10]
```
Let's see which stopwords are present in our first document.
```
stops = [token for token in manual_cleanup if token in stopwords.words('english')]
stops[:10]
print(f'There are {len(stops)} stopwords in the first document')
print(f'That is {len(stops)/len(manual_cleanup): 0.2%} of our text')
```
Let's also use the **FreqDist** tool to look at the makeup of our text before and after removal:
```
fdist = FreqDist(manual_cleanup)
plt.figure(figsize=(10, 10))
fdist.plot(30);
manual_cleanup = [token for token in manual_cleanup if\
token not in stopwords.words('english')]
manual_cleanup[:10]
# We can also customize our stopwords list
custom_sw = stopwords.words('english')
custom_sw.extend(["i'd","say"] )
custom_sw[-10:]
manual_cleanup = [token for token in manual_cleanup if token not in custom_sw]
print(f'After removing stopwords, there are {len(set(manual_cleanup))} unique words left')
fdist = FreqDist(manual_cleanup)
plt.figure(figsize=(10, 10))
fdist.plot(30);
```
#### Numerals
Numerals also usually have low semantic value. Their removal can help improve our models.
To remove them, we will use regular expressions, a powerful tool which you may already have some familiarity with.
```
manual_cleanup = [s.translate(str.maketrans('', '', '0123456789')) \
for s in manual_cleanup]
# drop empty strings
manual_cleanup = [s for s in manual_cleanup if s != '' ]
print(f'After removing numbers, there are {len(set(manual_cleanup))} unique words left')
```
# Regex
Regex allows us to match strings based on a pattern. This pattern comes from a language of identifiers, which we can begin exploring on the cheatsheet found here:
- https://regexr.com/
A few key symbols:
- . : matches any character
- \d, \w, \s : represent digit, word, whitespace
- *, ?, +: matches 0 or more, 0 or 1, 1 or more of the preceding character
- [A-Z]: matches any capital letter
- [a-z]: matches lowercase letter
Other helpful resources:
- https://regexcrossword.com/
- https://www.regular-expressions.info/tutorial.html
We can use regex to isolate numerals:
```
first_document
pattern = '[0-9]'
number = re.findall(pattern, first_document)
number
pattern2 = '[0-9]+'
number2 = re.findall(pattern2, first_document)
number2
```
## `RegexpTokenizer()`
Sklearn and NLTK provide us with a suite of **tokenizers** for our text preprocessing convenience.
```
first_document
# Remember that the '?' indicates 0 or 1 of what follows!
re.findall(r"([a-zA-Z]+(?:'[a-z]+)?)", "I'd")
pattern = "([a-zA-Z]+(?:'[a-z]+)?)"
tokenizer = RegexpTokenizer(pattern)
first_doc = tokenizer.tokenize(first_document)
first_doc = [token.lower() for token in first_doc]
first_doc = [token for token in first_doc if token not in custom_sw]
first_document
first_doc[:10]
print(f'We are down to {len(set(first_doc))} unique words')
```
# Exercise: NL Pre-Processing
**Activity:** Use what you've learned to preprocess the second article. How does the length and number of unique words in the article change?
<p>
</p>
<details>
<summary><b><u>Click Here for Answer Code</u></b></summary>
second_document = corpus.iloc[1].body
print(f'We start with {len(second_document.split())} words')
print(f'We start with {len(set(second_document.split()))} unique words')
second_doc = tokenizer.tokenize(second_document)
second_doc = [token.lower() for token in second_doc]
second_doc = [token for token in second_doc if token not in custom_sw]
print(f'We end with {len(second_doc)} words')
print(f'We end with {len(set(second_doc))} unique words')
</details>
```
second_document
len(set(corpus.iloc[1].body.split()))
list(set(corpus.iloc[1].body.split()))
len(second_document)
list(set(second_document))
second_doc
## Your code here
second_document = corpus.iloc[1].body
second_doc = tokenizer.tokenize(second_document)
second_doc = [token.lower() for token in second_doc]
second_doc = [token for token in second_doc if token not in custom_sw]
#second_doc[:10], print(f'We are down to {len(second_doc)} words'),\
#print(f'We are down to {len(set(second_doc))} unique words')
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# Знакомимся с переобучением и недообучением
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/overfit_and_underfit"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Читай на TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ru/tutorials/keras/overfit_and_underfit.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Запусти в Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ru/tutorials/keras/overfit_and_underfit.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Изучай код на GitHub</a>
</td>
</table>
Как и в предыдущий раз мы будем использовать `tf.keras` API, подробнее о котором ты можешь прочитать в нашем [руководстве по Keras](https://www.tensorflow.org/guide/keras).
В обоих предыдщих примерах с классификацией обзоров фильмов и предсказанием цен на жилье, мы увидели, что точность нашей модели на проверочных данных достигает пика после определенного количества эпох, а затем начинает снижаться.
Другими словами, наша модель учится на одних и тех же данных слишком долго - это называется *переобучение*. Очень важно знать способы как можно предотвратить это. Несмотря на то, что при помощи переобучения можно достичь более высоких показателей точности, но только на *тренировочных данных*, нашей целью всегда является обучить нейросеть обобщать их и узнавать паттерны на проверочных, новых данных.
Обратным случаем переобучения является *недообучение*: оно возникает когда все еще есть возможность улучшить показатели модели на проверочном наборе данных. Недообучение может произойти по разным причинам: например, если модель недостаточно сильная, или слишком сложная, или просто недостаточно тренировалась на данных. В любом случае это будет означать, что не были выучены основные паттерны из проверочного сета.
Если ты будешь тренировать модель слишком долго, то модель начнет обучаться шаблонам, которые свойственны *только* тренировочным данным, и не научится узнавать паттерны в новых данных. Нам нужно найти золотую середину. Понимание того как долго тренировать модель, сколько эпох выбрать - это очень полезный навык, которому мы сейчас научимся.
Чтобы избежать переобучения, наиболее оптимальным решением будет использовать больше тренировочных данных. Модели, обученные на большем количестве данных, естественным образом обобщают их лучше. Когда увеличить точность более не представляется возможным, то тогда мы начинаем использовать методы *регуляризации*. Они ограничивают количество и тип инофрмации, которые модель может хранить в себе. Если нейросеть может запомнить только небольшое количество паттернов, то тогда процесс оптимизации заставит ее сфокусироваться на самых важных, наиболее заметных шаблонах, которые будут иметь более высокий шанс обобщения.
В этом уроке мы познакомимся с двумя распространенными методами регуляризации: *регуляризация весов* и *исключение* (*dropout*). Мы используем их чтобы улучшить показатели нашей модели из урока по классификации обзоров фильмов из IMDB.
```
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
```
## Загружаем датасет IMDB
Вместо того, чтобы использовать *embedding* слой, как мы делали это в предыдущем уроке, здесь мы попробуем *multi-hot-encoding*. Наша модель быстро начнет переобучаться на тренировочных данных. Мы посмотрим как это произойдет и рассмотрим способы предотвращения этого.
Использование multi-hot-encoding на нашем массиве конвертирует его в векторы 0 и 1. Говоря конкретнее, это означает что например последовательность `[3, 5]` будет конвертирована в 10,000-размерный вектор, который будет состоять полностью из нулей за исключением 3 и 5, которые будут представлены в виде единиц.
```
NUM_WORDS = 10000
(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)
def multi_hot_sequences(sequences, dimension):
# Создаем матрицу формы (len(sequences), dimension), состоящую из нулей
results = np.zeros((len(sequences), dimension))
for i, word_indices in enumerate(sequences):
results[i, word_indices] = 1.0 # назначаем единицу на конкретные показатели results[i]
return results
train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)
test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)
```
Давай посмотрим на один из получившихся multi-hot векторов. Номера слов были отсортированы по частоте, и вполне ожидаемо, что многие значения единицы будут около нуля. Проверим это на графике:
```
plt.plot(train_data[0])
```
## Продемонстрируем переобучение
Самый простой способ предотвратить переобучение, это уменьшить размер модели, или количество обучаемых параметров, которые определяются количеством слоев и блоков на каждый слой. В глубоком обучении количество обучаемых параметров часто называют *емкостью модели*. Понятно, что модель с большим количество параметров будет иметь больший запас для обучения, и следовательно легче сможет выучить взаимосвязи между тренировочными образцами данных и целевыми проверочными. Обучение же без возможности обобщения окажется бесполезным, особенно если мы попытаемся получить предсказания на новых, ранее не виденных данных.
Всегда помни об этом: модели глубокого обучения всегда хорошо справляются с подстраиванием под тренировочные данные, но наша конечная цель - обучение обощению.
С другой стороны, если нейросеть имеет ограниченные ресурсы для запоминания шаблонов, то тогда она не сможет так же легко находить паттерны в данных. Чтобы сократить потери, такая модель будет вынуждена обучаться сжатым представлениям, которые имеют больше предсказательной силы. В то же самое время, если мы сделаем нашу модель слишком маленькой, тогда ей будет трудно подстроиться под тренировочный сет данных. Всегда нужно искать баланс между *слишком большой емкостью* и *недостаточной емкостью*.
К сожалению, не существует магической формулы, чтобы определить правильный размер или архитектуру модели, говоря о количестве слоев или размере каждого слоя. Тебе необходимо попробовать использовать разные архитектуры модели, прежде чем найти подходящую.
Чтобы найди подходящий размер модели лучше начать с относительно небольших слоев и параметров, затем начать увеличивать размер слоев или добавлять новые до тех пор, пока ты показатели не начнут ухудшаться на проверочных данных. Давай попробуем разобраться на примере нашей сети для классификации обзоров.
Для начала мы построим простую модель используя только слои ```Dense``` в качестве основы, а затем сделаем маленькую и большую версию этой модели для сравнения.
### Строим основу для модели
```
baseline_model = keras.Sequential([
# Параметр `input_shape` нужен только для того, чтобы заработал `.summary`
keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, activation=tf.nn.relu),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
baseline_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
baseline_model.summary()
baseline_history = baseline_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
```
### Создаем малый вариант
Давай построим модель с меньшим количесвом скрытых блоков и сравним ее с первой моделью:
```
smaller_model = keras.Sequential([
keras.layers.Dense(4, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dense(4, activation=tf.nn.relu),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
smaller_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
smaller_model.summary()
```
И обучим модель используя те же данные:
```
smaller_history = smaller_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
```
### Создаем большую модель
В качестве упражнения ты можешь создать модель даже еще больше, и посмотреть как быстро она начнет переобучаться. Затем протестируем эту модель, которая будет иметь гораздо бóльшую емкость, чем требуется для решения нашей задачи:
```
bigger_model = keras.models.Sequential([
keras.layers.Dense(512, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dense(512, activation=tf.nn.relu),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
bigger_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
bigger_model.summary()
```
И опять потренируем уже новую модель используя те же данные:
```
bigger_history = bigger_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
```
### Построим графики потерь
<!--TODO(markdaoust): This should be a one-liner with tensorboard -->
Непрерывные линии показывают потери во время обучения, а прерывистые - во время проверки (помни - чем меньше потери на проверочных данных, тем точнее модель). В нашем случае самая маленькая модель начинает переобучаться позже, чем основная (после 6 эпох вместо 4) и ее показатели ухудшаются гораздо медленее после переобучения.
```
def plot_history(histories, key='binary_crossentropy'):
plt.figure(figsize=(16,10))
for name, history in histories:
val = plt.plot(history.epoch, history.history['val_'+key],
'--', label=name.title()+' Val')
plt.plot(history.epoch, history.history[key], color=val[0].get_color(),
label=name.title()+' Train')
plt.xlabel('Epochs')
plt.ylabel(key.replace('_',' ').title())
plt.legend()
plt.xlim([0,max(history.epoch)])
plot_history([('baseline', baseline_history),
('smaller', smaller_history),
('bigger', bigger_history)])
```
Обрати внимание, что большая сеть начинает переобучаться почти сразу же после первой эпохи, и ее метрики ухудшаются гораздо быстрее. Чем больше емкость модели, тем легче она сможет вместить тренировочный сет данных, что повлечет за собой низкие потери при обучении. Но в таком случае она будет более чувствительна к переобучению: разница в потерях между обучением и проверкой будет очень велика.
## Как решить проблему переобучения?
### Добавить регуляризацию весов
Тебе может быть знаком принцип *бритвы Оккама*: если есть 2 толкования явления, то правильным является самое "простое" - то, которое содержит меньше всего предположений. Этот принцип также применим к моделям, обучемым при помощи нейронных сетей: для одних и той же сети и данных существует несколько весовых значений, или моделей, которые могут быть обучены. Простые модели переобучиваются гораздо реже, чем сложные.
В этом контексте "простая модель" - та, в которой распределение значений параметров имеет меньшую энтропию. Другими словами, модель с меньшим количеством параметров, которую мы строили выше является простой. Таким образом, для предотвращение переобучения часто используется ограничение сложности сети путем уменьшения ее коэфицентов, что делает распределение более равномерным или *регулярным*. Этот метод называется *регуляризация весов*: к функции потерь нашей сети мы добавляем штраф (или *cost*, стоимость) за использование больших весов.
Штраф имеет 2 вида:
* Регуляризация L1 - штраф прямо пропорционален абсолютному значению коэффицентов весов (сокращенно мы называем его "норма L1")
* Регуляризация L2 - штраф добавляется пропорционально квадрату значения коэффицента весов. Норму L2 также называют *угасанием весов*. Это два одинаковых названия для одной и той же математической формулы
Чтобы осуществить регуляризацию в `tf.keras` мы добавим новый регулятор в блок со слоями как аргумент. Давай попробуем добавить L2 и посмотреть что получится:
```
l2_model = keras.models.Sequential([
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation=tf.nn.relu),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
l2_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
l2_model_history = l2_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
```
Значение ```l2(0.001)``` означает, что каждый коэффицент матрицы весов слоя будет добавлять ```0.001 * weight_coefficient_value**2``` к общей потери сети. Обрати внимание, что штраф добавляется только во время обучения, потери во время этой стадии будут гораздо выше, чем во время проверки.
Вот так выглядит влияние регуляризации L2:
```
plot_history([('Базовая модель', baseline_history),
('Регуляризация L2', l2_model_history)])
```
Как видишь, прошедшая L2 регуляризцию модель стала более устойчива к переобучению, чем наша изначальная, несмотря на то, что обе модели имели равное количество параметров.
### Добавить исключение Dropout
Метод исключения (или выпадения) *Dropout* - один из самых эффективных и часто используемых приемов регуляризации нейронных сетей. Он был разработан Джеффом Хинтоном совместно с его студентами в Университете Торонто. Применяемый к слою Dropout состоит из случайно выпадающих (или равных нулю) признаков этого слоя.
Допустим, что наш слой обычно возвращает вектор [0.2, 0.5, 1.3, 0.8, 1.1] на входной образец данных. После применения Dropout этот вектор будет случайным образом приравнивать к нулю какие-то его значения, например так - [0, 0.5, 1.3, 0, 1.1].
Ту часть признаков, которые "выпадут" или обнуляться называют *коэффицентом исключения dropout*. Обычно его устанавливают между 0.2 и 0.5. Во время проверки dropout не используется, и вместо этого все выходные значения уменьшаются на соотвествующий коэффиент (скажем, 0.5). Это поможет нам сбалансировать тот факт, что во время проверки было активировано больше блоков, чем во время обучения.
В `tf.keras` ты можешь использовать метод исключения в своей сети при помощи слоя Dropout, который применяется к выводу данных из предшествующего слоя.
Давай добавим два слоя Dropout в нашу сеть на данных IMDB и посмотрим насколько хорошо она справится с переобучением:
```
dpt_model = keras.models.Sequential([
keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
keras.layers.Dropout(0.5),
keras.layers.Dense(16, activation=tf.nn.relu),
keras.layers.Dropout(0.5),
keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
dpt_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
dpt_model_history = dpt_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
plot_history([('Базовая модель', baseline_history),
('Метод Dropout', dpt_model_history)])
```
Метод Dropout имеет явные преимущества по сравнению с нашей изначальной, базовой моделью.
Подведем итоги - вот самые основные способы предотвращения переобучения нейросетей:
* Использовать больше данных для обучения
* Уменьшить емкость сети
* Использовать регуляризацию весов
* Или dropout
Также существуют еще два важных подхода, которые не были продемонстрированы в этом уроке: увеличение или *аугментация данных* и *нормализация батча*.
| github_jupyter |
# Named Entity Recognition on PILOT files using classic SpaCy pipeline
MiMoText pilot files are:
* Senac_Emigre
* Maistre_Voyage
* Sade_Aline
* Sade_Justine
* Bernadin_Paul
* Laclos_Liaisons
* Retif_Paysanne
* Retif_Paysan
* Mercier_An
* Retif_AntiJustine
* Rousseau_Julie
* Voltaire_Candide
For full list of metadata and MiMoText IDs see https://docs.google.com/spreadsheets/d/10HrWlxkAuOiMxgyDa4K8cA7syvbFJGAW2kgbonyyDvQ/edit#gid=0
The pretrained statistical models for French is multi-task CNN trained on UD French Sequoia and WikiNER. Assigns context-specific token vectors, POS tags, dependency parse and named entities.
When you call `nlp` on a text, spaCy first tokenizes the text to produce a `Doc` object. The `Doc` is then processed in several different steps – this is also referred to as the processing pipeline. The pipeline used by the default models consists of a tagger, a parser and an entity recognizer. Each pipeline component returns the processed `Doc`, which is then passed on to the next component.
```
import spacy
import re
import glob
import nltk
import sklearn
from spacy import pipeline
from spacy import morphology
from spacy import displacy
from collections import Counter
import fr_core_news_lg
import requests
sklearn.feature_extraction.text.CountVectorizer
# loading of french language model
nlp = fr_core_news_lg.load()
# printing out a sorted list of the ten most common LOC entities within the text
voltaire_candide = requests.get('https://raw.githubusercontent.com/MiMoText/roman-dixhuit/master/plain/files/Voltaire_Candide.txt')
voltaire_candide = nlp(voltaire_candide.text)
listOfPER_voltaire_candide = [ent for ent in voltaire_candide.ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfPER_voltaire_candide]).most_common(10)
# printing out a sorted list of the ten most common LOC entities within the text
senac_emigre = requests.get('https://raw.githubusercontent.com/MiMoText/roman-dixhuit/master/plain/files/Senac_Emigre.txt')
senac_emigre = nlp(senac_emigre.text)
Counter([ent.text.strip() for ent in [ent for ent in senac_emigre.ents if ent.label_ == 'LOC']]).most_common(10)
maistre_voyage = requests.get('https://raw.githubusercontent.com/MiMoText/roman-dixhuit/master/plain/files/Maistre_Voyage.txt')
maistre_voyage = nlp(maistre_voyage.text)
listOfLOC_maistre_voyage = [ent for ent in maistre_voyage.ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfLOC_maistre_voyage]).most_common(10)
laclos_liaisons = requests.get('https://raw.githubusercontent.com/MiMoText/roman-dixhuit/master/plain/files/Laclos_Liaisons.txt')
laclos_liaisons = nlp(laclos_liaisons.text)
listOfLOC_laclos_liaisons = [ent for ent in laclos_liaisons.ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfLOC_laclos_liaisons]).most_common(10)
#Increasing the max_length for longer novels
nlp.max_length = 1700000
rousseau_julie = requests.get('https://raw.githubusercontent.com/MiMoText/roman-dixhuit/master/plain/files/Rousseau_Julie.txt')
rousseau_julie = nlp(rousseau_julie.text)
listOfLOC_rousseau_julie = [ent for ent in rousseau_julie.ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfLOC_rousseau_julie]).most_common(10)
retif_paysanne = requests.get('https://raw.githubusercontent.com/MiMoText/roman-dixhuit/master/plain/files/Retif_Paysanne.txt')
retif_paysanne= nlp(retif_paysanne.text)
listOfLOC_retif_paysanne = [ent for ent in retif_paysanne.ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfLOC_retif_paysanne]).most_common(10)
#-->> Check: Why are there unusual LOC entitites ini retif_paysanne? Displacy renders the whole text with named entities (grey = PERS , orange = LOC, blue = ORG)
displacy.render(retif_paysanne,style = 'ent', jupyter=True)
retif_antijustine = requests.get('https://raw.githubusercontent.com/MiMoText/roman18/master/plain/files/Retif_AntiJustine.txt')
retif_antijustine= nlp(retif_antijustine.text)
listOfLOC_retif_antijustine = [ent for ent in retif_antijustine.ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfLOC_retif_antijustine]).most_common(10)
sade_justine = requests.get('https://raw.githubusercontent.com/MiMoText/roman-dixhuit/master/plain/files/Sade_Justine.txt')
sade_justine = nlp(sade_justine.text)
listOfLOC_sade_justine = [ent for ent in sade_justine.ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfLOC_sade_justine]).most_common(10)
sade_aline = requests.get('https://raw.githubusercontent.com/MiMoText/roman-dixhuit/master/plain/files/Sade_Aline.txt')
sade_aline = nlp(sade_aline.text)
listOfLOC_sade_aline = [ent for ent in sade_aline.ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfLOC_sade_aline]).most_common(10)
bernadin_paul = requests.get('https://raw.githubusercontent.com/MiMoText/roman18/master/plain/files/Bernardin_Paul.txt')
bernadin_paul = nlp(bernadin_paul.text)
listOfLOC_bernadin_paul = [ent for ent in bernadin_paul .ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfLOC_bernadin_paul ]).most_common(10)
mercier_an = requests.get('https://raw.githubusercontent.com/MiMoText/roman18/master/plain/files/Mercier_An.txt')
mercier_an = nlp(mercier_an.text)
listOfLOC_mercier_an = [ent for ent in mercier_an.ents if ent.label_ == 'LOC']
Counter([ent.text.strip() for ent in listOfLOC_mercier_an]).most_common(10)
```
# PER entities
Printing out a sorted list of the ten most common PER entities within the french novels (pilote corpus MiMoText)
```
Counter([ent.text.strip() for ent in [ent for ent in voltaire_candide.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in senac_emigre.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in maistre_voyage.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in laclos_liaisons.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in rousseau_julie.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in retif_paysanne.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in retif_antijustine.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in sade_justine.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in sade_aline.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in bernadin_paul.ents if ent.label_ == 'PER']]).most_common(10)
Counter([ent.text.strip() for ent in [ent for ent in mercier_an.ents if ent.label_ == 'PER']]).most_common(10)
# Computing Similarity with word vectors (SpaCy)
print('voltaire_candide et laclos_liaisons ',voltaire_candide.similarity(laclos_liaisons))
print('voltaire_candide et senac_emigre',voltaire_candide.similarity(senac_emigre))
print('voltaire_candide et sade aline',voltaire_candide.similarity(sade_aline))
print('voltaire_candide et maistre_voyage',voltaire_candide.similarity(maistre_voyage))
```
| github_jupyter |
# Make spectral libraries
```
import sys, os
sys.path.append('/Users/simon/git/vimms')
sys.path.insert(0,'/Users/simon/git/mass-spec-utils/')
from vimms.Common import save_obj
from tqdm import tqdm
%load_ext autoreload
%autoreload 2
library_cache = '/Users/simon/clms_er/library_cache'
```
## Massbank
```
from mass_spec_utils.library_matching.spec_libraries import MassBankLibrary
```
Path to the local version of the massbank repo
```
massbank_data_path = '/Users/simon/git/MassBank-Data/' # final slash is important!
mb = MassBankLibrary(mb_dir=massbank_data_path, polarity='POSITIVE')
save_obj(mb, os.path.join(library_cache, 'massbank_pos.p'))
mb = MassBankLibrary(mb_dir=massbank_data_path, polarity='NEGATIVE')
save_obj(mb, os.path.join(library_cache, 'massbank_neg.p'))
mb = MassBankLibrary(mb_dir=massbank_data_path, polarity='all')
save_obj(mb, os.path.join(library_cache, 'massbank_all.p'))
```
## GNPS
Using Florian's file, because it has inchikeys
```
json_file = '/Users/simon/Downloads/gnps_positive_ionmode_cleaned_by_matchms_and_lookups.json'
import json
with open(json_file,'r') as f:
payload = json.loads(f.read())
from mass_spec_utils.library_matching.spectrum import SpectralRecord
neg_intensities = []
def json_to_spectrum(json_dat):
precursor_mz = json_dat['precursor_mz']
original_file = json_file
spectrum_id = json_dat['spectrum_id']
inchikey = json_dat['inchikey_smiles']
peaks = json_dat['peaks_json']
metadata = {}
for k,v in json_dat.items():
if not k == 'peaks':
metadata[k] = v
mz,i = zip(*peaks)
if min(i) < 0:
neg_intensities.append(spectrum_id)
return None
else:
new_spectrum = SpectralRecord(precursor_mz, peaks, metadata, original_file, spectrum_id)
return new_spectrum
records = {}
for jd in tqdm(payload):
new_spec = json_to_spectrum(jd)
if new_spec is not None:
records[new_spec.spectrum_id] = new_spec
def filter_min_peaks(spectrum, min_n_peaks=10):
n_peaks = len(spectrum.peaks)
if n_peaks < min_n_peaks:
return None
else:
return spectrum
def filter_rel_intensity(spectrum, min_rel=0.01, max_rel=1.):
pp = spectrum.peaks
mz,i = zip(*pp)
max_i = max(i)
new_pp = []
for p in pp:
ri = p[1]/max_i
if ri <= max_rel and ri >= min_rel:
new_pp.append(p)
spectrum.peaks = new_pp
return spectrum
new_records = {}
for sid in tqdm(records.keys()):
spec = records[sid]
ss = filter_min_peaks(spec)
if ss is not None:
new_records[sid] = ss
else:
continue
ss = filter_rel_intensity(ss)
new_records[sid] = ss
for sid, ss in new_records.items():
ss.metadata['inchikey'] = ss.metadata['inchikey_smiles']
from mass_spec_utils.library_matching.spec_libraries import SpectralLibrary
sl = SpectralLibrary()
sl.records = new_records
sl.sorted_record_list = sl._dic2list()
save_obj(sl, os.path.join(library_cache,'gnps.p'))
```
| github_jupyter |
# How to handle WelDX files
In this notebook we will demonstrate how to create, read, and update ASDF files created by WelDX. All the needed funcationality is contained in a single class named `WeldxFile`. We are going to show different modes of operation, like working with physical files on your harddrive, and in-memory files, both read-only and read-write mode.
## Imports
The WeldxFile class is being imported from the top-level of the weldx package.
```
from datetime import datetime
import numpy as np
from weldx import WeldxFile
```
## Basic operations
Now we create our first file, by invoking the `WeldxFile` constructor without any additional arguments. By doing so, we create an in-memory file. This means, that your changes will be temporary until you write it to an actual file on your harddrive. The `file_handle` attribute will point to the actual underlying file. In this case it is the in-memory file or buffer as shown below.
```
file = WeldxFile()
file.file_handle
```
Next we assign some dictionary like data to the file, by storing it some attribute name enclosed by square brackets.
Then we look at the representation of the file header or contents. This will depend on the execution environment.
In JupyterLab you will see an interactive tree like structure, which can be expanded and searched.
The root of the tree is denoted as "root" followed by children created by the ASDF library "asdf_library" and "history". We attached the additional child "some_data" with our assignment.
```
data = {"data_sets": {"first": np.random.random(100), "time": datetime.now()}}
file["some_data"] = data
file
```
Note, that here we are using some very common types, namely an NumPy array and a timestamp. For weldx specialized types like the coordinates system manager, (welding) measurements etc., the weldx package provides ASDF extensions to handle those types automatically during loading and saving ASDF data. You do not need to worry about them. If you try to save types, which cannot be handled by ASDF, you will trigger an error.
We could also have created the same structure in one step:
```
file = WeldxFile(tree=data, mode="rw")
file
```
You might have noticed, that we got a warning about the in-memory operation during showing the file in Jupyter.
Now we have passed the additional argument mode="rw", which indicates, that we want to perform write operations just in memory,
or alternatively to the passed physical file. So this warning went away.
We can use all dictionary operations on the data we like, e.g. update, assign, and delete items.
```
file["data_sets"]["second"] = {"data": np.random.random(100), "time": datetime.now()}
# delete the first data set again:
del file["data_sets"]["first"]
file
```
We can also iterate over all keys as usual. You can also have a look at the documentation of the builtin type `dict` for a complete overview of its features.
```
for key, value in file.items():
print(key, value)
```
### Access to data by attributes
The access by key names can be tedious, when deeply nested dictionaries are involved. We provide a handling via attributes like this
```
accessible_by_attribute = file.as_attr()
accessible_by_attribute.data_sets.second
```
## Writing files to disk
In order to make your changes persistent, we are going to save the memory-backed file to disk by invoking `WeldxFile.write_to`.
```
file.write_to("example.asdf")
```
This newly created file can be opened up again, in read-write mode like by passing the appropriate arguments.
```
example = WeldxFile("example.asdf", mode="rw")
example["updated"] = True
example.close()
```
Note, that we closed the file here explicitly. Before closing, we wanted to write a simple item to tree. But lets see what happens, if we open the file once again.
```
example = WeldxFile("example.asdf", mode="rw")
display(example)
example.close()
```
As you see the `updated` state has been written, because we closed the file properly. If we omit closing the file,
our changes would be lost when the object runs out of scope or Python terminates.
## Handling updates within a context manager
To ensure you will not forget to update your file after making changes,
we are able to enclose our file-changing operations within a context manager.
This ensures that all operations done in this context (the `with` block) are being written to the file, once the context is left.
Note that the underlying file is also closed after the context ends. This is useful, when you have to update lots of files, as there is a limited amount of file handles an operating system can deal with.
```
with WeldxFile("example.asdf", mode="rw") as example:
example["updated"] = True
fh = example.file_handle
# now the context ends, and the file is being saved to disk again.
# lets check the file handle has been closed, after the context ended.
assert fh.closed
```
Let us inspect the file once again, to see whether our `updated` item has been correctly written.
```
WeldxFile("example.asdf")
```
In case an error got triggered (e.g. an exception has been raised) inside the context, the underlying file is still updated. You could prevent this behavior, by passing `sync=False` during file construction.
```
try:
with WeldxFile("example.asdf", mode="rw") as file:
file["updated"] = False
raise Exception("oh no")
except Exception as e:
print("expected error:", e)
WeldxFile("example.asdf")
```
## Keeping a log of changes when manipulating a file
It can become quite handy to know what has been done to file in the past. Weldx files provide a history log, in which arbitrary strings can be stored with time stamps and used software. We quickly run you through the process of adding history entries to your file.
```
filename_hist = "example_history.asdf"
with WeldxFile(filename_hist, mode="rw") as file:
file["some"] = "changes"
file.add_history_entry("added some changes")
WeldxFile(filename_hist).history
```
When you want to describe a custom software,
which is lets say a library or tool used to generate/modify the data in the file and we passed it into the creation of our WeldxFile.
```
software = dict(
name="my_tool", version="1.0", homepage="https://my_tool.org", author="the crowd"
)
with WeldxFile(filename_hist, mode="rw", software_history_entry=software) as file:
file["some"] = "changes"
file.add_history_entry("added more changes")
```
Let's now inspect how we wrote history.
```
WeldxFile(filename_hist).history[-1]
```
The entries key is a list of all log entries, where new entries are appended to. We have proper time stamps indicating when the change happened, the actual log entry, and optionally a custom software used to make the change.
## Handling of custom schemas
An important aspect of WelDX or ASDF files is, that you can validate them to comply with a defined schema. A schema defines required and optional attributes a tree structure has to provide to pass the schema validation. Further the types of these attributes can be defined, e.g. the data attribute should be a NumPy array, or a timestamp should be of type `pandas.Timestamp`.
There are several schemas provided by WelDX, which can be used by passing them to the `custom_schema` argument. It is expected to be a path-like type, so a string (`str`) or `pathlib.Path` is accepted. The provided utility function `get_schema_path` returns the path to named schema. So its output can directly be used in WeldxFile(schema=...)
```
from weldx.asdf.util import get_schema_path
schema = get_schema_path("single_pass_weld-0.1.0")
schema
```
This schema defines a complete experimental setup with measurement data, e.g requires the following attributes to be defined in our tree:
- workpiece
- TCP
- welding_current
- welding_voltage
- measurements
- equipment
We use a testing function to provide this data now, and validate it against the schema by passing the `custom_schema` during WeldxFile creation.
Here we just have a look at the process parameters sub-dictionary.
```
from weldx.asdf.cli.welding_schema import single_pass_weld_example
_, single_pass_weld_data = single_pass_weld_example(out_file=None)
display(single_pass_weld_data["process"])
```
That is a lot of data, containing complex data structures and objects describing the whole experiment including measurement data.
We can now create new `WeldxFile` and validate the data against the schema.
```
WeldxFile(tree=single_pass_weld_data, custom_schema=schema, mode="rw")
```
But what would happen, if we forget an import attribute? Lets have a closer look...
```
# simulate we forgot something important, so we delete the workpiece:
del single_pass_weld_data["workpiece"]
# now create the file again, and see what happens:
try:
WeldxFile(tree=single_pass_weld_data, custom_schema=schema, mode="rw")
except Exception as e:
display(e)
```
We receive a ValidationError from the ASDF library, which tells us exactly what the missing information is. The same will happen, if we accidentally pass the wrong type.
```
# simulate a wrong type by changing it to a NumPy array.
single_pass_weld_data["welding_current"] = np.zeros(10)
# now create the file again, and see what happens:
try:
WeldxFile(tree=single_pass_weld_data, custom_schema=schema, mode="rw")
except Exception as e:
display(e)
```
Here we see, that a `signal` tag is expected, but a `asdf/core/ndarray-1.0.0` was received.
The ASDF library assigns tags to certain types to handle their storage in the file format.
As shown, the `signal` tag is contained in `weldx/measurement` container, provided by `weldx.bam.de`. The tags and schemas also provide a version number, so future updates in the software become manageable.
Custom schemas can be used to define own protocols or standards describing your data.
## Summary
In this tutorial we have encountered how to easily open, inspect, manipulate, and update ASDF files created by WelDX. We've learned that these files can store a variety of different data types and structures.
Discussed features:
* Opening in read/write mode `WeldxFile(mode="rw")`.
* Creating files in memory (passing no file name to `WeldxFile()` constructor).
* Writing to disk (`WeldxFile.write_to`).
* Keeping log of changes (`WeldxFile.history`, `WeldxFile.add_history_entry`).
* Validation against a schema `WeldxFile(custom_schema="/path/my_schema.yaml")`
| github_jupyter |
```
import os
import json,cv2
import pandas as pd
import numpy as np
import torch,torchvision
import wandb
from torch.nn import *
from torch.optim import *
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from torchvision.models import *
import wandb
device = 'cuda'
PROJECT_NAME = 'Car-Object-Detection-V1-Learning-Object-Detection'
torch.__version__,torchvision.__version__,wandb.__version__,json.__version__,pd.__version__,np.__version__
data = pd.read_csv('./data.csv').sample(frac=1)
data
img = cv2.imread('./data/vid_4_12300.jpg')
xmin,ymin,xmax,ymax = 386,185,554,230
x = xmin
y = ymin
w = xmax - xmin
h = ymax - ymin
crop = img[y:y+h,x:x+w]
plt.imshow(crop)
cv2.imwrite('./crop.png',crop)
plt.imshow(cv2.rectangle(img,(x,y),(x+w,y+h),(200,0,0),2))
cv2.imwrite('./box.png',cv2.rectangle(img,(x,y),(x+w,y+h),(200,0,0),2))
def load_data():
new_data = []
for idx in tqdm(range(len(data)):)
new_data_iter = []
info = data.iloc[idx]
new_data.append([
cv2.resize(cv2.imread(f'./data/{info["image"]}'),(112,112))/255.0,
[info['xmin'],info['ymin'],info['xmax'],info['ymax']]
])
X = []
y = []
for d in new_data:
X.append(d[0])
y.append(d[1])
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.5,shuffle=False)
X_train = torch.from_numpy(np.array(X_train)).to(device).view(-1,3,56,56).float()
y_train = torch.from_numpy(np.array(y_train)).to(device).float()
X_test = torch.from_numpy(np.array(X_test)).to(device).view(-1,3,56,56).float()
y_test = torch.from_numpy(np.array(y_test)).to(device).float()
return X,y,X_train,X_test,y_train,y_test,new_data
X,y,X_train,X_test,y_train,y_test,new_data = load_data()
torch.save(X_train,'X_train.pt')
torch.save(y_train,'y_train.pt')
torch.save(X_test,'X_test.pt')
torch.save(y_test,'y_test.pt')
torch.save(X_train,'X_train.pth')
torch.save(y_train,'y_train.pth')
torch.save(X_test,'X_test.pth')
torch.save(y_test,'y_test.pth')
def get_loss(model,X,y,criterion):
preds = model(X)
loss = criterion(preds,y)
return loss.item()
def get_accuracy(model,X,y):
preds = model(X)
correct = 0
total = 0
for pred,yb in zip(preds,y):
pred = int(torch.argmax(pred))
yb = int(torch.argmax(yb))
if pred == yb:
correct += 1
total += 1
acc = round(correct/total,3)*100
return acc
model = resnet18(pretrained=True).to(device)
model.fc = Linear(512,4)
criterion = MSELoss()
optimizer = Adam(model.parameters(),lr=0.001)
epochs = 100
batch_size = 32
wandb.init(project=PROJECT_NAME,name='baseline-TL-True')
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),batch_size):
try:
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
model.to(device)
preds = model(X_batch)
loss = criterion(preds,y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
except:
pass
model.eval()
torch.cuda.empty_cache()
wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion))/2})
torch.cuda.empty_cache()
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
torch.cuda.empty_cache()
wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})
torch.cuda.empty_cache()
wandb.log({'Val ACC':get_accuracy(model,X_test,y_test)})
torch.cuda.empty_cache()
model.train()
wandb.finish()
y_batch.shape,preds.shape
model = resnet18(pretrained=False).to(device)
model.fc = Linear(512,4)
criterion = MSELoss()
optimizer = Adam(model.parameters(),lr=0.001)
epochs = 100
batch_size = 32
wandb.init(project=PROJECT_NAME,name='baseline-TL-False')
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),batch_size):
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
model.to(device)
preds = model(X_batch)
loss = criterion(preds,y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
torch.cuda.empty_cache()
wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion))/2})
torch.cuda.empty_cache()
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
torch.cuda.empty_cache()
wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})
torch.cuda.empty_cache()
wandb.log({'Val ACC':get_accuracy(model,X_test,y_test)})
torch.cuda.empty_cache()
model.train()
wandb.finish()
class Model(Module):
def __init__(self):
super().__init__()
self.max_pool2d = MaxPool2d((2,2),(2,2))
self.activation = ReLU()
self.conv1 = Conv2d(3,7,(5,5))
self.conv2 = Conv2d(7,14,(5,5))
self.conv2bn = BatchNorm2d(14)
self.conv3 = Conv2d(14,21,(5,5))
self.linear1 = Linear(21*3*3,256)
self.linear2 = Linear(256,512)
self.linear2bn = BatchNorm1d(512)
self.linear3 = Linear(512,256)
self.output = Linear(256,len(labels))
def forward(self,X):
preds = self.max_pool2d(self.activation(self.conv1(X)))
preds = self.max_pool2d(self.activation(self.conv2bn(self.conv2(preds))))
preds = self.max_pool2d(self.activation(self.conv3(preds)))
preds = preds.view(-1,21*3*3)
preds = self.activation(self.linear1(preds))
preds = self.activation(self.linear2bn(self.linear2(preds)))
preds = self.activation(self.linear3(preds))
preds = self.output(preds)
return preds
model = Model().to(device)
criterion = MSELoss()
optimizer = Adam(model.parameters(),lr=0.001)
epochs = 100
batch_size = 32
wandb.init(project=PROJECT_NAME,name='baseline-CNN')
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),batch_size):
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
model.to(device)
preds = model(X_batch)
loss = criterion(preds,y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
torch.cuda.empty_cache()
wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion))/2})
torch.cuda.empty_cache()
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
torch.cuda.empty_cache()
wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})
torch.cuda.empty_cache()
wandb.log({'Val ACC':get_accuracy(model,X_test,y_test)})
torch.cuda.empty_cache()
model.train()
wandb.finish()
```
| github_jupyter |
[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)
# Particle Filters
```
#format the book
%matplotlib notebook
from __future__ import division, print_function
from book_format import load_style
load_style()
```
## Motivation
Here is our problem. We have moving objects that we want to track. Maybe the objects are fighter jets and missiles, or maybe we are tracking people playing cricket in a field. It doesn't really matter. Which of the filters that we have learned can handle this problem? Unfortunately, none of them are ideal. Let's think about the characteristics of this problem.
* **multimodal**: We want to track zero, one, or more than one object simultaneously.
* **occlusions**: One object can hide another, resulting in one measurement for multiple objects.
* **nonlinear behavior**: Aircraft are buffeted by winds, balls move in parabolas, and people collide into each other.
* **nonlinear measurements**: Radar gives us the distance to an object. Converting that to an (x,y,z) coordinate requires a square root, which is nonlinear.
* **non-Gaussian noise:** as objects move across a background the computer vision can mistake part of the background for the object.
* **continuous:** the object's position and velocity (i.e. the state space) can smoothly vary over time.
* **multivariate**: we want to track several attributes, such as position, velocity, turn rates, etc.
* **unknown process model**: we may not know the process model of the system
None of the filters we have learned work well with all of these constraints.
* **Discrete Bayes filter**: This has most of the attributes. It is multimodal, can handle nonlinear measurements, and can be extended to work with nonlinear behavior. However, it is discrete and univariate.
* **Kalman filter**: The Kalman filter produces optimal estimates for unimodal linear systems with Gaussian noise. None of these are true for our problem.
* **Unscented Kalman filter**: The UKF handles nonlinear, continuous, multivariate problems. However, it is not multimodal nor does it handle occlusions. It can handle noise that is modestly non-Gaussian, but does not do well with distributions that are very non-Gaussian or problems that are very nonlinear.
* **Extended Kalman filter**: The EKF has the same strengths and limitations as the UKF, except that is it even more sensitive to strong nonlinearities and non-Gaussian noise.
## Monte Carlo Sampling
In the UKF chapter I generated a plot similar to this to illustrate the effects of nonlinear systems on Gaussians:
```
from code.book_plots import interactive_plot
import code.pf_internal as pf_internal
with interactive_plot():
pf_internal.plot_monte_carlo_ukf()
```
The left plot shows 3,000 points normally distributed based on the Gaussian
$$\mu = \begin{bmatrix}0\\0\end{bmatrix},\, \, \, \Sigma = \begin{bmatrix}32&15\\15&40\end{bmatrix}$$
The right plots shows these points passed through this set of equations:
$$\begin{aligned}x&=x+y\\
y &= 0.1x^2 + y^2\end{aligned}$$
Using a finite number of randomly sampled points to compute a result is called a [*Monte Carlo*](https://en.wikipedia.org/wiki/Monte_Carlo_method) (MC) method. The idea is simple. Generate enough points to get a representative sample of the problem, run the points through the system you are modeling, and then compute the results on the transformed points.
In a nutshell this is what particle filtering does. The Bayesian filter algorithm we have been using throughout the book is applied to thousands of particles, where each particle represents a *possible* state for the system. We extract the estimated state from the thousands of particles using weighted statistics of the particles.
## Generic Particle Filter Algorithm
1. **Randomly generate a bunch of particles**
Particles can have position, heading, and/or whatever other state variable you need to estimate. Each has a weight (probability) indicating how likely it matches the actual state of the system. Initialize each with the same weight.
2. **Predict next state of the particles**
Move the particles based on how you predict the real system is behaving.
3. **Update**
Update the weighting of the particles based on the measurement. Particles that closely match the measurements are weighted higher than particles which don't match the measurements very well.
4. **Resample**
Discard highly improbable particle and replace them with copies of the more probable particles.
5. **Compute Estimate**
Optionally, compute weighted mean and covariance of the set of particles to get a state estimate.
This naive algorithm has practical difficulties which we will need to overcome, but this is the general idea. Let's see an example. I wrote a particle filter for the robot localization problem from the UKF and EKF chapters. The robot has steering and velocity control inputs. It has sensors that measures distance to visible landmarks. Both the sensors and control mechanism have noise in them, and we need to track the robot's position.
Here I run a particle filter and plotted the positions of the particles. The plot on the left is after one iteration, and on the right is after 10. The red 'X' shows the actual position of the robot, and the large circle is the computed weighted mean position.
```
with interactive_plot():
pf_internal.show_two_pf_plots()
```
If you are viewing this in a browser, this animation shows the entire sequence:
<img src='animations/particle_filter_anim.gif'>
After the first iteration the particles are still largely randomly scattered around the map, but you can see that some have already collected near the robot's position. The computed mean is quite close to the robot's position. This is because each particle is weighted based on how closely it matches the measurement. The robot is near (1,1), so particles that are near (1, 1) will have a high weight because they closely match the measurements. Particles that are far from the robot will not match the measurements, and thus have a very low weight. The estimated position is computed as the weighted mean of positions of the particles. Particles near the robot contribute more to the computation so the estimate is quite accurate.
Several iterations later you can see that all the particles have clustered around the robot. This is due to the *resampling* step. Resampling discards particles that are very improbable (very low weight) and replaces them with particles with higher probability.
I haven't fully shown *why* this works nor fully explained the algorithms for particle weighting and resampling, but it should make intuitive sense. Make a bunch of random particles, move them so they 'kind of' follow the robot, weight them according to how well they match the measurements, only let the likely ones live. It seems like it should work, and it does.
## Probability distributions via Monte Carlo
Suppose we want to know the area under the curve $y= \mathrm{e}^{\sin(x)}$ in the interval [0, $\pi$]. The area is computed with the definite integral $\int_0^\pi \mathrm{e}^{\sin(x)}\, \mathrm{d}x$. As an exercise, go ahead and find the answer; I'll wait.
If you are wise you did not take that challenge; $\mathrm{e}^{\sin(x)}$ cannot be integrated analytically. The world is filled with equations which we cannot integrate. For example, consider calculating the luminosity of an object. An object reflects some of the light that strike it. Some of the reflected light bounces off of other objects and restrikes the original object, increasing the luminosity. This creates a *recursive integral*. Good luck with that one.
However, integrals are trivial to compute using a Monte Carlo technique. To find the area under a curve create a bounding box that contains the curve in the desired interval. Generate randomly positioned point within the box, and compute the ratio of points that fall under the curve vs the total number of points. For example, if 40% of the points are under the curve and the area of the bounding box is 1, then the area under the curve is approximately 0.4. As you tend towards infinite points you can achieve any arbitrary precision. In practice, a few thousand points will give you a fairly accurate result.
You can use this technique to numerically integrate a function of any arbitrary difficulty. this includes non-integrable and noncontinuous functions. This technique was invented by Stanley Ulam at Los Alamos National Laboratory to allow him to perform computations for nuclear reactions which were unsolvable on paper.
Let's compute $\pi$ by finding the area of a circle. We will define a circle with a radius of 1, and bound it in a square. The side of the square has length 2, so the area is 4. We generate a set of uniformly distributed random points within the box, and count how many fall inside the circle. The area of the circle is computed as the area of the box times the ratio of points inside the circle vs. the total number of points. Finally, we know that $A = \pi r^2$, so we compute $\pi = A / r^2$.
We start by creating the points.
```python
N = 20000
pts = uniform(-1, 1, (N, 2))
```
A point is inside a circle if its distance from the center of the circle is less than or equal to the radius. We compute the distance with `numpy.linalg.norm`, which computes the magnitude of a vector. Since vectors start at (0, 0) calling norm will compute the point's distance from the origin.
```python
dist = np.linalg.norm(pts, axis=1)
```
Next we compute which of this distances fit the criteria. This code returns a bool array that contains `True` if it meets the condition `dist <= 1`:
```python
in_circle = dist <= 1
```
All that is left is to count the points inside the circle, compute pi, and plot the results. I've put it all in one cell so you can experiment with alternative values for `N`, the number of points.
```
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import uniform
N = 20000 # number of points
radius = 1
area = (2*radius)**2
pts = uniform(-1, 1, (N, 2))
# distance from (0,0)
dist = np.linalg.norm(pts, axis=1)
in_circle = dist <= 1
pts_in_circle = np.count_nonzero(in_circle)
pi = area * (pts_in_circle / N)
# plot results
with interactive_plot():
plt.scatter(pts[in_circle,0], pts[in_circle,1],
marker=',', edgecolor='k', s=1)
plt.scatter(pts[~in_circle,0], pts[~in_circle,1],
marker=',', edgecolor='r', s=1)
plt.axis('equal')
print('mean pi(N={})= {:.4f}'.format(N, pi))
print('err pi(N={})= {:.4f}'.format(N, np.pi-pi))
```
This insight leads us to the realization that we can use Monte Carlo to compute the probability density of any probability distribution. For example, suppose we have this Gaussian:
```
from filterpy.stats import plot_gaussian_pdf
with interactive_plot():
plot_gaussian_pdf(mean=2, variance=3);
```
The probability density function (PDF) gives the probability that the random value falls between 2 values. For example, we may want to know the probability of x being between 0 and 2 in the graph above. This is a continuous function, so we need to take the integral to find the area under the curve, as the area is equal to the probability for that range of values to occur.
$$P[a \le X \le b] = \int_a^b f_X(x) \, dx$$
It is easy to compute this integral for a Gaussian. But real life is not so easy. For example, the plot below shows a probability distribution. There is no way to analytically describe an arbitrary curve, let alone integrate it.
```
with interactive_plot():
pf_internal.plot_random_pd()
```
We can use Monte Carlo methods to compute any integral. The PDF is computed with an integral, hence we can compute the PDF of this curve using Monte Carlo.
## The Particle Filter
All of this brings us to the particle filter. Consider tracking a robot or a car in an urban environment. For consistency I will use the robot localization problem from the EKF and UKF chapters. In this problem we tracked a robot that has a sensor which measures the range and bearing to known landmarks.
Particle filters are a family of algorithms. I'm presenting a specific form of a particle filter that is intuitive to grasp and relates to the problems we have studied in this book. This will leave a few of the steps seeming a bit 'magical' since I haven't offered a full explanation. That will follow later in the chapter.
Taking insight from the discussion in the previous section we start by creating several thousand *particles*. Each particle has a position that represents a possible belief of where the robot is in the scene, and perhaps a heading and velocity. Suppose that we have no knowledge of the location of the robot. We would want to scatter the particles uniformly over the entire scene. If you think of all of the particles representing a probability distribution, locations where there are more particles represent a higher belief, and locations with fewer particles represents a lower belief. If there was a large clump of particles near a specific location that would imply that we were more certain that the robot is there.
Each particle needs a weight - ideally the probability that it represents the true position of the robot. This probability is rarely computable, so we only require it be *proportional* to that probability, which is computable. At initialization we have no reason to favor one particle over another, so we assign a weight of $1/N$, for $N$ particles. We use $1/N$ so that the sum of all probabilities equals one.
The combination of particles and weights forms the *probability distribution* for our problem. Think back to the *Discrete Bayes* chapter. In that chapter we modeled positions in a hallway as discrete and uniformly spaced. This is very similar except the particles are randomly distributed in a continuous space rather than constrained to discrete locations. In this problem the robot can move on a plane of some arbitrary dimension, with the lower right corner at (0,0).
To track our robot we need to maintain states for x, y, and heading. We will store `N` particles in a `(N, 3)` shaped array. The three columns contain x, y, and heading, in that order.
If you are passively tracking something (no control input), then you would need to include velocity in the state and use that estimate to make the prediction. More dimensions requires exponentially more particles to form a good estimate, so we always try to minimize the number of random variables in the state.
This code creates a uniform and Gaussian distribution of particles over a region:
```
from numpy.random import uniform
def create_uniform_particles(x_range, y_range, hdg_range, N):
particles = np.empty((N, 3))
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
particles[:, 2] %= 2 * np.pi
return particles
def create_gaussian_particles(mean, std, N):
particles = np.empty((N, 3))
particles[:, 0] = mean[0] + (randn(N) * std[0])
particles[:, 1] = mean[1] + (randn(N) * std[1])
particles[:, 2] = mean[2] + (randn(N) * std[2])
particles[:, 2] %= 2 * np.pi
return particles
```
For example:
```
create_uniform_particles((0,1), (0,1), (0, np.pi*2), 4)
```
### Predict Step
The predict step in the Bayes algorithm uses the process model to update the belief in the system state. How would we do that with particles? Each particle represents a possible position for the robot. Suppose we send a command to the robot to move 0.1 meters while turning by 0.007 radians. We could move each particle by this amount. If we did that we would soon run into a problem. The robot's controls are not perfect so it will not move exactly as commanded. Therefore we need to add noise to the particle's movements to have a reasonable chance of capturing the actual movement of the robot. If you do not model the uncertainty in the system the particle filter will not correctly model the probability distribution of our belief in the robot's position.
```
def predict(particles, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise Q (std heading change, std velocity)`"""
N = len(particles)
# update heading
particles[:, 2] += u[0] + (randn(N) * std[0])
particles[:, 2] %= 2 * np.pi
# move in the (noisy) commanded direction
dist = (u[1] * dt) + (randn(N) * std[1])
particles[:, 0] += np.cos(particles[:, 2]) * dist
particles[:, 1] += np.sin(particles[:, 2]) * dist
```
### Update Step
Next we get a set of measurements - one for each landmark currently in view. How should these measurements be used to alter our probability distribution as modeled by the particles?
Think back to the **Discrete Bayes** chapter. In that chapter we modeled positions in a hallway as discrete and uniformly spaced. We assigned a probability to each position which we called the *prior*. When a new measurement came in we multiplied the current probability of that position (the *prior*) by the *likelihood* that the measurement matched that location:
```python
def update(likelihood, prior):
posterior = prior * likelihood
return normalize(posterior)
```
which is an implementation of the equation
$$x = \| \mathcal L \bar x \|$$
which is a realization of Bayes theorem:
$$\begin{aligned}P(x \mid z) &= \frac{P(z \mid x)\, P(x)}{P(z)} \\
&= \frac{\mathtt{likelihood}\times \mathtt{prior}}{\mathtt{normalization}}\end{aligned}$$
We do the same with our particles. Each particle has a position and a weight which estimates how well it matches the measurement. Normalizing the weights so they sum to one turns them into a probability distribution. The particles those that are closest to the robot will generally have a higher weight than ones far from the robot.
```
def update(particles, weights, z, R, landmarks):
weights.fill(1.)
for i, landmark in enumerate(landmarks):
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= scipy.stats.norm(distance, R).pdf(z[i])
weights += 1.e-300 # avoid round-off to zero
weights /= sum(weights) # normalize
```
In the literature this part of the algorithm is called *Sequential Importance Sampling*, or SIS. The equation for the weights is called the *importance density*. I will give these theoretical underpinnings in a following section. For now I hope that this makes intuitive sense. If we weight the particles according to how how they match the measurements they are probably a good sample for the probability distribution of the system after incorporating the measurements. Theory proves this is so. The weights are the *likelihood* in Bayes theorem. Different problems will need to tackle this step in slightly different ways but this is the general idea.
### Computing the State Estimate
In most applications you will want to know the estimated state after each update, but the filter consists of nothing but a collection of particles. Assuming that we are tracking one object (i.e. it is unimodal) we can compute the mean of the estimate as the sum of the weighted values of the particles.
$$ \mu = \frac{1}{N}\sum\limits_{i=1}^N w^ix^i$$
Here I adopt the notation $x^i$ to indicate the i$^{th}$ particle. A superscript is used because we often need to use subscripts to denote time steps the k$^{th}$ or k+1$^{th}$ particle, yielding the unwieldy $x^i_{k+1}$.
This function computes both the mean and variance of the particles:
```
def estimate(particles, weights):
"""returns mean and variance of the weighted particles"""
pos = particles[:, 0:2]
mean = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mean)**2, weights=weights, axis=0)
return mean, var
```
If we create a uniform distribution of points in a 1x1 square with equal weights we get a mean position very near the center of the square at (0.5, 0.5) and a small variance.
```
particles = create_uniform_particles((0,1), (0,1), (0, 5), 1000)
weights = np.array([.25]*1000)
estimate(particles, weights)
```
### Particle Resampling
The SIS algorithm suffers from the *degeneracy problem*. It starts with uniformly distributed particles with equal weights. There may only be a handful of particles near the robot. As the algorithm runs any particle that does not match the measurements will acquire an extremely low weight. Only the particles which are near the robot will have an appreciable weight. We could have 5,000 particles with only 3 contributing meaningfully to the state estimate! We say the filter has *degenerated*.
This problem is usually solved by some form of *resampling* of the particles. Particles with very small weights do not meaningfully describe the probability distribution of the robot.
The resampling algorithm discards particles with very low probability and replaces them with new particles with higher probability. It does that by duplicating particles with relatively high probability. The duplicates are slightly dispersed by the noise added in the predict step. This results in a set of points in which a large majority of the particles accurately represent the probability distribution.
There are many resampling algorithms. For now let's look at one of the simplest, *simple random resampling*, also called *multinomial resampling*. It samples from the current particle set $N$ times, making a new set of particles from the sample. The probability of selecting any given particle should be proportional to its weight.
We accomplish this with NumPy's `cumsum` function. `cumsum` computes the cumulative sum of an array. That is, element one is the sum of elements zero and one, element two is the sum of elements zero, one and two, etc. Then we generate random numbers in the range of 0.0 to 1.0 and do a binary search to find the weight that most closely matches that number:
```
def simple_resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(N))
# resample according to indexes
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights) # normalize
```
We don't resample at every epoch. For example, if you received no new measurements you have not received any information from which the resample can benefit. We can determine when to resample by using something called the *effective N*, which approximately measures the number of particles which meaningfully contribute to the probability distribution. The equation for this is
$$\hat{N}_\text{eff} = \frac{1}{\sum w^2}$$
and we can implement this in Python with
```
def neff(weights):
return 1. / np.sum(np.square(weights))
```
If $\hat{N}_\text{eff}$ falls below some threshold it is time to resample. A useful starting point is $N/2$, but this varies by problem. It is also possible for $\hat{N}_\text{eff} = N$, which means the particle set has collapsed to one point (each has equal weight). It may not be theoretically pure, but if that happens I create a new distribution of particles in the hopes of generating particles with more diversity. If this happens to you often, you may need to increase the number of particles, or otherwise adjust your filter. We will talk more of this later.
## SIR Filter - A Complete Example
There is more to learn, but we know enough to implement a full particle filter. We will implement the *Sampling Importance Resampling filter*, or SIR.
I need to introduce a more sophisticated resampling method than I gave above. FilterPy provides several resampling methods. I will describe them later. They take an array of weights and returns indexes to the particles that have been chosen for the resampling. We just need to write a function that performs the resampling from these indexes:
```
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights)
```
To implement the filter we need to create the particles and the landmarks. We then execute a loop, successively calling `predict`, `update`, resampling, and then computing the new state estimate with `estimate`.
```
from filterpy.monte_carlo import systematic_resample
from numpy.linalg import norm
from numpy.random import randn
import scipy.stats
def run_pf1(N, iters=18, sensor_std_err=.1,
do_plot=True, plot_particles=False,
xlim=(0, 20), ylim=(0, 20),
initial_x=None):
landmarks = np.array([[-1, 2], [5, 10], [12,14], [18,21]])
NL = len(landmarks)
plt.figure()
# create particles and weights
if initial_x is not None:
particles = create_gaussian_particles(
mean=initial_x, std=(5, 5, np.pi/4), N=N)
else:
particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
weights = np.zeros(N)
if plot_particles:
alpha = .20
if N > 5000:
alpha *= np.sqrt(5000)/np.sqrt(N)
plt.scatter(particles[:, 0], particles[:, 1],
alpha=alpha, color='g')
xs = []
robot_pos = np.array([0., 0.])
for x in range(iters):
robot_pos += (1, 1)
# distance from robot to each landmark
zs = (norm(landmarks - robot_pos, axis=1) +
(randn(NL) * sensor_std_err))
# move diagonally forward to (x+1, x+1)
predict(particles, u=(0.00, 1.414), std=(.2, .05))
# incorporate measurements
update(particles, weights, z=zs, R=sensor_std_err,
landmarks=landmarks)
# resample if too few effective particles
if neff(weights) < N/2:
indexes = systematic_resample(weights)
resample_from_index(particles, weights, indexes)
mu, var = estimate(particles, weights)
xs.append(mu)
if plot_particles:
plt.scatter(particles[:, 0], particles[:, 1],
color='k', marker=',', s=1)
p1 = plt.scatter(robot_pos[0], robot_pos[1], marker='+',
color='k', s=180, lw=3)
p2 = plt.scatter(mu[0], mu[1], marker='s', color='r')
xs = np.array(xs)
#plt.plot(xs[:, 0], xs[:, 1])
plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
plt.xlim(*xlim)
plt.ylim(*ylim)
print('final position error, variance:\n\t', mu, var)
from numpy.random import seed
seed(2)
run_pf1(N=5000, plot_particles=False)
```
Most of this code is devoted to initialization and plotting. The entirety of the particle filter processing consists of these lines:
```python
# move diagonally forward to (x+1, x+1)
predict(particles, u=(0.00, 1.414), std=(.2, .05))
# incorporate measurements
update(particles, weights, z=zs, R=sensor_std_err,
landmarks=landmarks)
# resample if too few effective particles
if neff(weights) < N/2:
indexes = systematic_resample(weights)
resample_from_index(particles, weights, indexes)
mu, var = estimate(particles, weights)
```
The first line predicts the position of the particles with the assumption that the robot is moving in a straight line (`u[0] == 0`) and moving 1 unit in both the x and y axis (`u[1]==1.414`). The standard deviation for the error in the turn is 0.2, and the standard deviation for the distance is 0.05. When this call returns the particles will all have been moved forward, but the weights are no longer correct as they have not been updated.
The next line incorporates the measurement into the filter. This does not alter the particle positions, it only alters the weights. If you recall the weight of the particle is computed as the probability that it matches the Gaussian of the sensor error model. The further the particle from the measured distance the less likely it is to be a good representation.
The final two lines example the effective particle count ($\hat{N}_\text{eff})$. If it falls below $N/2$ we perform resampling to try to ensure our particles form a good representation of the actual probability distribution.
Now let's look at this with all the particles plotted. Seeing this happen interactively is much more instructive, but this format still gives us useful information. I plotted the original random distribution of points in a very pale green and large circles to help distinguish them from the subsequent iterations where the particles are plotted with black pixels. The number of particles makes it hard to see the details, so I limited the number of iterations to 8 so we can zoom in and look more closely.
```
seed(2)
run_pf1(N=5000, iters=8, plot_particles=True,
xlim=(0,8), ylim=(0,8))
```
From the plot it looks like there are only a few particles at the first two robot positions. This is not true; there are 5,000 particles, but due to resampling most are duplicates of each other. The reason for this is the Gaussian for the sensor is very narrow. This is called *sample impoverishment* and can lead to filter divergence. I'll address this in detail below. For now, looking at the second step at x=2 we can see that the particles have dispersed a bit. This dispersion is due to the motion model noise. All particles are projected forward according to the control input `u`, but noise is added to each particle proportional to the error in the control mechanism in the robot. By the third step the particles have dispersed enough to make a convincing cloud of particles around the robot.
The shape of the particle cloud is an ellipse. This is not a coincidence. The sensors and robot control are both modeled as Gaussian, so the probability distribution of the system is also a Gaussian. The particle filter is a sampling of the probability distribution, so the cloud should be an ellipse.
It is important to recognize that the particle filter algorithm *does not require* the sensors or system to be Gaussian or linear. Because we represent the probability distribution with a cloud of particles we can handle any probability distribution and strongly nonlinear problems. There can be discontinuities and hard limits in the probability model.
### Effect of Sensor Errors on the Filter
The first few iterations of the filter resulted in many duplicate particles. This happens because the model for the sensors is Gaussian, and we gave it a small standard deviation of $\sigma=0.1$. This is counterintuitive at first. The Kalman filter performs better when the noise is smaller, yet the particle filter can perform worse.
We can reason about why this is true. If $\sigma=0.1$, the robot is at (1, 1) and a particle is at (2, 2) the particle is 14 standard deviations away from the robot. This gives it a near zero probability. It contributes nothing to the estimate of the mean, and it is extremely unlikely to survive after the resampling. If $\sigma=1.4$ then the particle is only $1\sigma$ away and thus it will contribute to the estimate of the mean. During resampling it is likely to be copied one or more times.
This is *very important* to understand - a very accurate sensor can lead to poor performance of the filter because few of the particles will be a good sample of the probability distribution. There are a few fixes available to us. First, we can artificially increase the sensor noise standard deviation so the particle filter will accept more points as matching the robots probability distribution. This is non-optimal because some of those points will be a poor match. The real problem is that there aren't enough points being generated such that enough are near the robot. Increasing `N` usually fixes this problem. This decision is not cost free as increasing the number of particles significantly increase the computation time. Still, let's look at the result of using 100,000 particles.
```
seed(2)
run_pf1(N=100000, iters=8, plot_particles=True,
xlim=(0,8), ylim=(0,8))
```
There are many more particles at x=1, and we have a convincing cloud at x=2. Clearly the filter is performing better, but at the cost of large memory usage and long run times.
Another approach is to be smarter about generating the initial particle cloud. Suppose we guess that the robot is near (0, 0). This is not exact, as the simulation actually places the robot at (1, 1), but it is close. If we create a normally distributed cloud near (0, 0) there is a much greater chance of the particles matching the robot's position.
`run_pf1()` has an optional parameter `initial_x`. Use this to specify the initial position guess for the robot. The code then uses `create_gaussian_particles(mean, std, N)` to create particles distributed normally around the initial guess. We will use this in the next section.
### Filter Degeneracy From Inadequate Samples
The filter as written is far from perfect. Here is how it performs with a different random seed.
```
seed(6)
run_pf1(N=5000, plot_particles=True, ylim=(-20, 20))
```
Here the initial sample of points did not generate any points near the robot. The particle filter does not create new points during the resample operation, so it ends up duplicating points which are not a representative sample of the probability distribution. As mentioned earlier this is called *sample impoverishment*. The problem quickly spirals out of control. The particles are not a good match for the landscape measurement so they become dispersed in a highly nonlinear, curved distribution, and the particle filter diverges from reality. No particles are available near the robot, so it cannot ever converge.
Let's make use of the `create_gaussian_particles()` method to try to generate more points near the robot. We can do this by using the `initial_x` parameter to specify a location to create the particles.
```
seed(6)
run_pf1(N=5000, plot_particles=True, initial_x=(1,1, np.pi/4))
```
This works great. You should always try to create particles near the initial position if you have any way to roughly estimate it. Do not be *too* careful - if you generate all the points very near a single position the particles may not be dispersed enough to capture the nonlinearities in the system. This is a fairly linear system, so we could get away with a smaller variance in the distribution. Clearly this depends on your problem. Increasing the number of particles is always a good way to get a better sample, but the processing cost may be a higher price than you are willing to pay.
## Importance Sampling
I've hand waved a difficulty away which we must now confront. There is some probability distribution that describes the position and movement of our robot. We want to draw a sample of particles from that distribution and compute the integral using MC methods.
Our difficulty is that in many problems we don't know the distribution. For example, the tracked object might move very differently than we predicted with our state model. How can we draw a sample from a probability distribution that is unknown?
There is a theorem from statistics called [*importance sampling*](https://en.wikipedia.org/wiki/Importance_sampling)[1]. Remarkably, it gives us a way to draw samples from a different and known probability distribution and use those to compute the properties of the unknown one. It's a fantastic theorem that brings joy to my heart.
The idea is simple, and we already used it. We draw samples from the known probability distribution, but *weight the samples* according to the distribution we are interested in. We can then compute properties such as the mean and variance by computing the weighted mean and weighted variance of the samples.
For the robot localization problem we drew samples from the probability distribution that we computed from our state model prediction step. In other words, we reasoned 'the robot was there, it is perhaps moving at this direction and speed, hence it might be here'. Yet the robot might have done something completely different. It may have fell off a cliff or been hit by a mortar round. In each case the probability distribution is not correct. It seems like we are stymied, but we are not because we can use importance sampling. We drew particles from that likely incorrect probability distribution, then weighted them according to how well the particles match the measurements. That weighting is based on the true probability distribution, so according to the theory the resulting mean, variance, etc, will be correct!
How can that be true? I'll give you the math; you can safely skip this if you don't plan to go beyond the robot localization problem. However, other particle filter problems require different approaches to importance sampling, and a bit of math helps. Also, the literature and much of the content on the web uses the mathematical formulation in favor of my rather imprecise "imagine that..." exposition. If you want to understand the literature you will need to know the following equations.
We have some probability distribution $\pi(x)$ which we want to take samples from. However, we don't know what $\pi(x)$ is; instead we only know an alternative probability distribution $q(x)$. In the context of robot localization, $\pi(x)$ is the probability distribution for the robot, but we don't know it, and $q(x)$ is the probability distribution of our measurements, which we do know.
The expected value of a function $f(x)$ with probability distribution $\pi(x)$ is
$$\mathbb{E}\big[f(x)\big] = \int f(x)\pi(x)\, dx$$
We don't know $\pi(x)$ so we cannot compute this integral. We do know an alternative distribution $q(x)$ so we can add it into the integral without changing the value with
$$\mathbb{E}\big[f(x)\big] = \int f(x)\pi(x)\frac{q(x)}{q(x)}\, dx$$
Now we rearrange and group terms
$$\mathbb{E}\big[f(x)\big] = \int f(x)q(x)\, \, \cdot \, \frac{\pi(x)}{q(x)}\, dx$$
$q(x)$ is known to us, so we can compute $\int f(x)q(x)$ using MC integration. That leaves us with $\pi(x)/q(x)$. That is a ratio, and we define it as a *weight*. This gives us
$$\mathbb{E}\big[f(x)\big] = \sum\limits_{i=1}^N f(x^i)w(x^i)$$
Maybe that seems a little abstract. If we want to compute the mean of the particles we would compute
$$\mu = \sum\limits_{i=1}^N x^iw^i$$
which is the equation I gave you earlier in the chapter.
It is required that the weights be proportional to the ratio $\pi(x)/q(x)$. We normally do not know the exact value, so in practice we normalize the weights by dividing them by $\sum w(x^i)$.
When you formulate a particle filter algorithm you will have to implement this step depending on the particulars of your situation. For robot localization the best distribution to use for $q(x)$ is the particle distribution from the `predict()` step of the filter. Let's look at the code again:
```python
def update(particles, weights, z, R, landmarks):
weights.fill(1.)
for i, landmark in enumerate(landmarks):
dist = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= scipy.stats.norm(dist, R).pdf(z[i])
weights += 1.e-300 # avoid round-off to zero
weights /= sum(weights) # normalize
```
The reason for `self.weights.fill(1.)` might have confused you. In all the Bayesian filters up to this chapter we started with the probability distribution created by the `predict` step, and this appears to discard that information by setting all of the weights to 1. Well, we are discarding the weights, but we do not discard the particles. That is a direct result of applying importance sampling - we draw from the known distribution, but weight by the unknown distribution. In this case our known distribution is the uniform distribution - all are weighted equally.
Of course if you can compute the posterior probability distribution from the prior you should do so. If you cannot, then importance sampling gives you a way to solve this problem. In practice, computing the posterior is incredibly difficult. The Kalman filter became a spectacular success because it took advantage of the properties of Gaussians to find an analytic solution. Once we relax the conditions required by the Kalman filter (Markov property, Gaussian measurements and process) importance sampling and monte carlo methods make the problem tractable.
## Resampling Methods
The resampling algorithm effects the performance of the filter. For example, suppose we resampled particles by picking particles at random. This would lead us to choosing many particles with a very low weight, and the resulting set of particles would be a terrible representation of the problem's probability distribution.
Research on the topic continues, but a handful of algorithms work well in practice across a wide variety of situations. We desire an algorithm that has several properties. It should preferentially select particles that have a higher probability. It should select a representative population of the higher probability particles to avoid sample impoverishment. It should include enough lower probability particles to give the filter a chance of detecting strongly nonlinear behavior.
FilterPy implements several of the popular algorithms. FilterPy doesn't know how your particle filter is implemented, so it cannot generate the new samples. Instead, the algorithms create a `numpy.array` containing the indexes of the particles that are chosen. Your code needs to perform the resampling step. For example, I used this for the robot:
```
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights)
```
### Multinomial Resampling
Multinomial resampling is the algorithm that I used while developing the robot localization example. The idea is simple. Compute the cumulative sum of the normalized weights. This gives you an array of increasing values from 0 to 1. Here is a plot which illustrates how this spaces out the weights. The colors are meaningless, they just make the divisions easier to see.
```
from code.pf_internal import plot_cumsum
print('cumulative sume is', np.cumsum([.1, .2, .1, .6]))
plot_cumsum([.1, .2, .1, .6])
```
To select a weight we generate a random number uniformly selected between 0 and 1 and use binary search to find its position inside the cumulative sum array. Large weights occupy more space than low weights, so they will be more likely to be selected.
This is very easy to code using NumPy's [ufunc](http://docs.scipy.org/doc/numpy/reference/ufuncs.html) support. Ufuncs apply functions to every element of an array, returning an array of the results. `searchsorted` is NumPy's binary search algorithm. If you provide is with an array of search values it will return an array of answers; one answer for each search value.
```
def multinomal_resample(weights):
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off errors
return np.searchsorted(cumulative_sum, random(len(weights)))
```
Here is an example:
```
from code.pf_internal import plot_multinomial_resample
plot_multinomial_resample([.1, .2, .3, .4, .2, .3, .1])
```
This is an $O(n \log(n))$ algorithm. That is not terrible, but there are $O(n)$ resampling algorithms with better properties with respect to the uniformity of the samples. I'm showing it because you can understand the other algorithms as variations on this one. There is a faster implementation of this multinomial resampling that uses the inverse of the CDF of the distribution. You can search on the internet if you are interested.
Import the function from FilterPy using
```python
from filterpy.monte_carlo import multinomal_resample
```
### Residual Resampling
Residual resampling both improves the run time of multinomial resampling, and ensures that the sampling is uniform across the population of particles. It's fairly ingenious: the normalized weights are multiplied by *N*, and then the integer value of each weight is used to define how many samples of that particle will be taken. For example, if the weight of a particle is 0.0012 and $N$=3000, the scaled weight is 3.6, so 3 samples will be taken of that particle. This ensures that all higher weight particles are chosen at least once. The running time is $O(N)$, making it faster than multinomial resampling.
However, this does not generate all *N* selections. To select the rest, we take the *residual*: the weights minus the integer part, which leaves the fractional part of the number. We then use a simpler sampling scheme such as multinomial, to select the rest of the particles based on the residual. In the example above the scaled weight was 3.6, so the residual will be 0.6 (3.6 - int(3.6)). This residual is very large so the particle will be likely to be sampled again. This is reasonable because the larger the residual the larger the error in the round off, and thus the particle was relatively under sampled in the integer step.
```
def residual_resample(weights):
N = len(weights)
indexes = np.zeros(N, 'i')
# take int(N*w) copies of each weight
num_copies = (N*np.asarray(weights)).astype(int)
k = 0
for i in range(N):
for _ in range(num_copies[i]): # make n copies
indexes[k] = i
k += 1
# use multinormial resample on the residual to fill up the rest.
residual = w - num_copies # get fractional part
residual /= sum(residual) # normalize
cumulative_sum = np.cumsum(residual)
cumulative_sum[-1] = 1. # ensures sum is exactly one
indexes[k:N] = np.searchsorted(cumulative_sum, random(N-k))
return indexes
```
You may be tempted to replace the inner for loop with a slice `indexes[k:k + num_copies[i]] = i`, but very short slices are comparatively slow, and the for loop usually runs faster.
Let's look at an example:
```
from code.pf_internal import plot_residual_resample
plot_residual_resample([.1, .2, .3, .4, .2, .3, .1])
```
You may import this from FilterPy using
```python
from filterpy.monte_carlo import residual_resample
```
### Stratified Resampling
This scheme aims to make selections relatively uniformly across the particles. It works by dividing the cumulative sum into $N$ equal sections, and then selects one particle randomly from each section. This guarantees that each sample is between 0 and $\frac{2}{N}$ apart.
The plot below illustrates this. The colored bars show the cumulative sum of the array, and the black lines show the $N$ equal subdivisions. Particles, shown as black circles, are randomly placed in each subdivision.
```
from pf_internal import plot_stratified_resample
plot_stratified_resample([.1, .2, .3, .4, .2, .3, .1])
```
The code to perform the stratification is quite straightforward.
```
def stratified_resample(weights):
N = len(weights)
# make N subdivisions, chose a random position within each one
positions = (random(N) + range(N)) / N
indexes = np.zeros(N, 'i')
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
indexes[i] = j
i += 1
else:
j += 1
return indexes
```
Import it from FilterPy with
```python
from filterpy.monte_carlo import stratified_resample
```
### Systematic Resampling
The last algorithm we will look at is systemic resampling. As with stratified resampling the space is divided into $N$ divisions. We then choose a random offset to use for all of the divisions, ensuring that each sample is exactly $\frac{1}{N}$ apart. It looks like this.
```
from pf_internal import plot_systematic_resample
plot_systematic_resample([.1, .2, .3, .4, .2, .3, .1])
```
Having seen the earlier examples the code couldn't be simpler.
```
def systematic_resample(weights):
N = len(weights)
# make N subdivisions, choose positions
# with a consistent random offset
positions = (np.arange(N) + random()) / N
indexes = np.zeros(N, 'i')
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
indexes[i] = j
i += 1
else:
j += 1
return indexes
```
Import from FilterPy with
```python
from filterpy.monte_carlo import systematic_resample
```
### Choosing a Resampling Algorithm
Let's look at the four algorithms at once so they are easier to compare.
```
a = [.1, .2, .3, .4, .2, .3, .1]
np.random.seed(4)
plot_multinomial_resample(a)
plot_residual_resample(a)
plot_systematic_resample(a)
plot_stratified_resample(a)
```
The performance of the multinomial resampling is quite bad. There is a very large weight that was not sampled at all. The largest weight only got one resample, yet the smallest weight was sample was sampled twice. Most tutorials on the net that I have read use multinomial resampling, and I am not sure why. Multinomial resampling is rarely used in the literature or for real problems. I recommend not using it unless you have a very good reason to do so.
The residual resampling algorithm does excellently at what it tries to do: ensure all the largest weights are resampled multiple times. It doesn't evenly distribute the samples across the particles - many reasonably large weights are not resampled at all.
Both systematic and stratified perform very well. Systematic sampling does an excellent job of ensuring we sample from all parts of the particle space while ensuring larger weights are proportionality resampled more often. Stratified resampling is not quite as uniform as systematic resampling, but it is a bit better at ensuring the higher weights get resampled more.
Plenty has been written on the theoretical performance of these algorithms, and feel free to read it. In practice I apply particle filters to problems that resist analytic efforts, and so I am a bit dubious about the validity of a specific analysis to these problems. In practice both the stratified and systematic algorithms perform well and similarly across a variety of problems. I say try one, and if it works stick with it. If performance of the filter is critical try both, and perhaps see if there is literature published on your specific problem that will give you better guidance.
## Summary
This chapter only touches the surface of what is a vast topic. My goal was not to teach you the field, but to expose you to practical Bayesian Monte Carlo techniques for filtering.
Particle filters are a type of *ensemble* filtering. Kalman filters represents state with a Gaussian. Measurements are applied to the Gaussian using Bayes Theorem, and the prediction is done using state-space methods. These techniques are applied to the Gaussian - the probability distribution.
In contrast, ensemble techniques represent a probability distribution using a discrete collection of points and associated probabilities. Measurements are applied to these points, not the Gaussian distribution. Likewise, the system model is applied to the points, not a Gaussian. We then compute the statistical properties of the resulting ensemble of points.
These choices have many trade-offs. The Kalman filter is very efficient, and is an optimal estimator if the assumptions of linearity and Gaussian noise are true. If the problem is nonlinear than we must linearize the problem. If the problem is multimodal (more than one object being tracked) then the Kalman filter cannot represent it. The Kalman filter requires that you know the state model. If you do not know how your system behaves the performance is poor.
In contrast, particle filters work with any arbitrary, non-analytic probability distribution. The ensemble of particles, if large enough, form an accurate approximation of the distribution. It performs wonderfully even in the presence of severe nonlinearities. Importance sampling allows us to compute probabilities even if we do not know the underlying probability distribution. Monte Carlo techniques replace the analytic integrals required by the other filters.
This power comes with a cost. The most obvious costs are the high computational and memory burdens the filter places on the computer. Less obvious is the fact that they are fickle. You have to be careful to avoid particle degeneracy and divergence. It can be very difficult to prove the correctness of your filter. If you are working with multimodal distributions you have further work to cluster the particles to determine the paths of the multiple objects. This can be very difficult when the objects are close to each other.
There are many different classes of particle filter; I only described the naive SIS algorithm, and followed that with a SIR algorithm that performs well. There are many classes of filters, and many examples of filters in each class. It would take a small book to describe them all.
When you read the literature on particle filters you will find that it is strewn with integrals. We perform computations on probability distributions using integrals, so using integrals gives the authors a powerful and compact notation. You must recognize that when you reduce these equations to code you will be representing the distributions with particles, and integrations are replaced with sums over the particles. If you keep in mind the core ideas in this chapter the material shouldn't be daunting.
## References
[1] *Importance Sampling*, Wikipedia.
https://en.wikipedia.org/wiki/Importance_sampling
| github_jupyter |
```
from microfaune.detection import RNNDetector
import csv
import os
import glob
import pandas as pd
from microfaune import audio
import scipy.signal as scipy_signal
from IPython.display import clear_output
from shutil import copyfile
weightsPath = ""
XCDataPath = ""
column_names = ["Folder","Clip","Bird_Label","Global Score"]
df = pd.DataFrame(columns = column_names)
bird_detector = RNNDetector(weightsPath)
Normalized_Sample_Rate = 44100
dataList = []
list = os.listdir(XCDataPath) # dir is your directory path
num_filesXC = len(list)
countXC = 0
errCount = 0
repCountXC = 1
repListXC = []
# with open("DAXC.csv",mode='a') as dataset:
# writer = csv.writer(dataset,delimiter=",")
# writer.writerow(["Folder","Clip","Bird_Label","Global Score"])
for file in glob.glob(XCDataPath + "*.wav"):
path_list = file.split("/")
folder_name = path_list[len(path_list) - 2 ]
clip_name = path_list[len(path_list) - 1 ]
if "(1)" in clip_name:
repCountXC += 1
repListXC.append(clip_name)
continue
SAMPLE_RATE, SIGNAL = audio.load_wav(XCDataPath + clip_name)
# downsample the audio if the sample rate > 44.1 kHz
# Force everything into the human hearing range.
# May consider reworking this function so that it upsamples as well
if SAMPLE_RATE > Normalized_Sample_Rate:
rate_ratio = Normalized_Sample_Rate / SAMPLE_RATE
SIGNAL = scipy_signal.resample(
SIGNAL, int(len(SIGNAL)*rate_ratio))
SAMPLE_RATE = Normalized_Sample_Rate
# resample produces unreadable float32 array so convert back
#SIGNAL = np.asarray(SIGNAL, dtype=np.int16)
#print(SIGNAL.shape)
# convert stereo to mono if needed
# Might want to compare to just taking the first set of data.
if len(SIGNAL.shape) == 2:
SIGNAL = SIGNAL.sum(axis=1) / 2
try:
microfaune_features = bird_detector.compute_features([SIGNAL])
global_score, local_score = bird_detector.predict(microfaune_features)
clear_output(wait=True)
dataList.append([folder_name, clip_name,'y',global_score[0][0]])
countXC += 1
print(str(countXC) + "/" + str(num_filesXC))
except:
print(file + " Failed")
errCount += 1
continue
# with open("DAXC.csv",mode='a') as dataset:
# writer = csv.writer(dataset,delimiter=",")
# writer.writerow([folder_name,clip_name,'y',global_score[0][0]])
print("Errors: " + str(errCount))
nonBirdPath = ""
list = os.listdir(nonBirdPath) # dir is your directory path
num_files = len(list)
countNB = 0
errCount = 0
repCountNB = 0
repListNB = []
for file in glob.glob(nonBirdPath + "*.wav"):
path_list = file.split("/")
folder_name = path_list[len(path_list) - 2 ]
clip_name = path_list[len(path_list) - 1 ]
if "(1)" in clip_name:
repCountNB += 1
repListNB.append(clip_name)
continue
SAMPLE_RATE, SIGNAL = audio.load_wav(nonBirdPath + clip_name)
# downsample the audio if the sample rate > 44.1 kHz
# Force everything into the human hearing range.
# May consider reworking this function so that it upsamples as well
if SAMPLE_RATE > Normalized_Sample_Rate:
rate_ratio = Normalized_Sample_Rate / SAMPLE_RATE
SIGNAL = scipy_signal.resample(
SIGNAL, int(len(SIGNAL)*rate_ratio))
SAMPLE_RATE = Normalized_Sample_Rate
# resample produces unreadable float32 array so convert back
#SIGNAL = np.asarray(SIGNAL, dtype=np.int16)
#print(SIGNAL.shape)
# convert stereo to mono if needed
# Might want to compare to just taking the first set of data.
if len(SIGNAL.shape) == 2:
SIGNAL = SIGNAL.sum(axis=1) / 2
try:
microfaune_features = bird_detector.compute_features([SIGNAL])
global_score, local_score = bird_detector.predict(microfaune_features)
clear_output(wait=True)
dataList.append([folder_name,clip_name,'n',global_score[0][0]])
countNB += 1
print(str(countNB) + "/" + str(num_files))
# There are more non bird files than bird present files so we balance them
if (countNB >= countXC):
break
except:
print(file + " Failed")
errCount += 1
continue
print("Errors: " + str(errCount))
df = pd.DataFrame(dataList, columns = ["Folder","Clip","Bird_Label","Global Score"])
df
csvName = ""
df.to_csv(csvName)
```
| github_jupyter |
## Desafio Final 1
Bootcamp Analista de Machine Learning @ IGTI
**Objetivos**:
* Pré-processamento dos dados.
* Detecção de anomalias
* Processamento dos dados.
* Correlações.
* Redução da dimensionalidade.
* Algoritmos supervisionados e não supervisionados
**Análise com:**
* Redução de dimensionalidade
* Clusterização com K-means
* Classificação supervisionada
```
import pandas as pd
import numpy as np
import seaborn as sns
from google.colab import drive
drive.mount('/content/drive')
cars = pd.read_csv('/content/drive/My Drive/Data Science/Bootcamp Analista de ML/Desafio Final/cars.csv')
```
## Conhecendo o dataset
**Significado das classes:**
* mpg = miles per gallon
* cylinders = quantidade de cilindros, que é a origem da força mecânica que possibilita o deslocamento do veículo
* cubicinches = volume total de ar e combustível queimado pelos cilindros através do motor
* hp = horse power
* weightlbs = peso do carro em libras
* time-to-60 = capacidade em segundos do carro de ir de 0 a 60 milhas por horas
* year = ano de fabricação
* brand = marca, origem, etc.
1 kg = 2,20462 lbs
```
cars.head()
cars.describe()
#linhas x colunas
cars.shape
#Existem dados faltantes ?
cars.isnull().sum()
cars.info()
```
## Teste: Desafio Final
Pergunta 1 - Após a utilização da biblioteca pandas para a leitura dos dados sobre os valores lidos, é CORRETO afirmar que:
```
cars.isnull().sum()
```
**Não foram encontrados valores nulos após a leitura dos dados.**
Pergunta 2 - Realize a transformação das colunas “cubicinches” e “weightlbs” do tipo “string” para o tipo numérico utilizando o pd.to_numeric(), utilizando o parâmetro errors='coerce'. Após essa transformação, é CORRETO afirmar:
```
#Convertendo valores objects para numeric
cars['cubicinches'] = pd.to_numeric(cars['cubicinches'], errors='coerce')
cars['weightlbs'] = pd.to_numeric(cars['weightlbs'], errors='coerce')
#Verificando resultado
cars.info()
cars.isnull().sum()
```
**Essa transformação adiciona valores nulos ao nosso dataset.**
Pergunta 3 - Indique quais eram os índices dos valores presentes no dataset que “forçaram” o pandas a compreender a variável “cubicinches” como string.
```
indices_cub = [cars[cars['cubicinches'].isnull()]]
indices_cub
```
Pergunta 4 - Após a transformação das variáveis “string” para os valores numéricos, quantos valores nulos (células no dataframe) passaram a existir no dataset?
```
cars.isnull().sum()
```
Pergunta 5 - Substitua os valores nulos introduzidos no dataset, após a transformação, pelo valor médio das colunas. Qual é o novo valor médio da coluna “weightlbs”?
```
cars['cubicinches'] = cars['cubicinches'].fillna(cars['cubicinches'].mean())
cars['weightlbs'] = cars['weightlbs'].fillna(cars['weightlbs'].mean())
cars.isnull().sum()
cars['weightlbs'].mean()
```
Pergunta 6 - Após substituir os valores nulos pela média das colunas, selecione as colunas ['mpg', 'cylinders', 'cubicinches', 'hp', 'weightlbs', 'time-to-60', 'year']. Qual é o valor da mediana para a característica 'mpg'?
```
cars['mpg'].median()
```
Pergunta 7 - Qual é a afirmação CORRETA sobre o valor de 14,00 para a variável “time-to-60”?
```
cars.describe()
```
75% dos dados são maiores que o valor de 14,00.
8 - Sobre o coeficiente de correlação de Pearson entre as variáveis “cylinders” e “mpg”, é correto afirmar
```
from scipy import stats
stats.pearsonr(cars['cylinders'], cars['mpg'])
from sklearn.metrics import r2_score
r2_score(cars['cylinders'], cars['mpg'])
```
Mesmo não sendo igual a 1, é possível dizer que à medida em que a variável “cylinders” aumenta, a variável “mpg” também aumenta na mesma direção.
9 - Sobre o boxplot da variável “hp”, é correto afirmar, EXCETO:
```
sns.boxplot(cars['hp'])
```
Cada um dos quartis possui a mesma quantidade de valores para a variável “hp”.
10 - Após normalizado, utilizando a função StandardScaler(), qual é o maior valor para a variável “hp”?
```
cars.head()
cars_normalizar = cars.drop('brand', axis=1)
cars_normalizar.head()
from sklearn.preprocessing import StandardScaler
normalizar = StandardScaler() #instanciando o standart scaler
scaler = normalizar.fit(cars_normalizar.values) #fitando o dataset para normalizar
cars_normalizado = scaler.transform(cars_normalizar.values) #normalizando
cars_normalizado = pd.DataFrame(cars_normalizado, columns=cars_normalizar.columns) #transformando o array numpy em data frame do pandas
cars_normalizado['hp'].max()
```
11 - Aplicando o PCA, conforme a definição acima, qual é o valor da variância explicada com pela primeira componente principal
```
from sklearn.decomposition import PCA
pca = PCA(n_components=7)
principais = pca.fit_transform(cars_normalizado)
pca.explained_variance_ratio_
```
12 - Utilize os três primeiros componentes principais para construir o K-means com um número de 3 clusters. Sobre os clusters, é INCORRETO afirmar que
```
principais.explained_variance_ratio_
principais_componentes = pd.DataFrame(principais)
principais_componentes.head()
principais_componentes_k = principais_componentes.iloc[:, :3] #selecionando todas as linhas e as 3 primeiras colunas
principais_componentes_k.columns = ['componente 1', 'componente 2', 'componente 3']
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3, random_state=42).fit(principais_componentes_k) #Parâmetros dados no desafio
principais_componentes_k['cluster'] = kmeans.labels_ #adicionando coluna do cluster em que o carro está
principais_componentes_k
principais_componentes_k['cluster'].value_counts() #Contando a quantidade de elementos dos clusters gerados
```
13 - Após todo o processamento realizado nos itens anteriores, crie uma coluna que contenha a variável de eficiência do veículo. Veículos que percorrem mais de 25 milhas com um galão (“mpg”>25) devem ser considerados eficientes. Utilize as colunas ['cylinders' ,'cubicinches' ,'hp' ,'weightlbs','time-to-60'] como entradas e como saída a coluna de eficiência criada.
Utilizando a árvore de decisão como mostrado, qual é a acurácia do modelo?
```
cars.head()
entradas = np.array(cars[['cylinders' ,'cubicinches' ,'hp' ,'weightlbs' ,'time-to-60']])
saidas = np.array(cars['mpg'] > 25).astype(int) #zero = maior, 1 = menor
entradas
saidas
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(entradas, saidas, test_size=0.30, random_state=42)
from sklearn.tree import DecisionTreeClassifier
classificador = DecisionTreeClassifier(random_state=42)
classificador.fit(x_train, y_train)
y_pred = classificador.predict(x_test)
from sklearn.metrics import accuracy_score
acuracia = accuracy_score(y_test, y_pred)
acuracia
```
14 - Sobre a matriz de confusão obtida após a aplicação da árvore de decisão, como mostrado anteriormente, é INCORRETO afirmar:
```
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_pred)
```
Existem duas vezes mais veículos considerados não eficientes que instâncias de veículos eficientes
15 - Utilizando a mesma divisão de dados entre treinamento e teste empregada para a análise anterior, aplique o modelo de regressão logística como mostrado na descrição do trabalho.
Comparando os resultados obtidos com o modelo de árvore de decisão, é INCORRETO afirmar que:
```
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(random_state=42).fit(x_train, y_train)
logreg_y_pred = logreg.predict(x_test)
accuracy_score(y_test, logreg_y_pred)
```
# Fim
# Visite o meu [github](https://github.com/k3ybladewielder) <3
| github_jupyter |
```
import pandas as pd
import numpy as np
import os, glob
import pandas as pd
import numpy as np
%matplotlib inline
#%matplotlib notebook
import seaborn as sns
sns.reset_orig()
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import pdb
import requests
import sys
from importlib import reload
from pchipOceanSlices import PchipOceanSlices
import visualizeProfs as vp
#reload(visualizeProfs)
coord = {}
coord['lat'] = 0
coord['long'] = 59.5
shape = vp.construct_box(coord, 20, 20)
ids = ['5901721_24',
'3900105_196',
'4900595_140',
'4900593_152',
'4900883_92',
'5901898_42',
'6900453_3',
'6900453_5',
'6900453_6',
'3900495_188',
'3900495_189',
'3900495_190',
'4901139_74',
'2901211_144',
'2900784_254',
'2901709_19',
'1901218_88',
'4901787_0',
'6902566_44',
'4901787_6',
'6901002_100',
'2902100_104',
'6901002_102',
'6901541_103',
'2901703_157',
'2901765_1',
'4901750_125',
'4902382_4',
'4901285_208',
'4901285_209',
'4902107_54',
'6901448_149',
'6901740_126',
'5901884_302',
'4901466_156',
'4901462_174',
'4901798_110',
'4901798_112',
'4902391_58',
'6902661_118',
'4901824_91',
'4902457_2',
'5904485_280',
'5904485_284',]
startDate='2007-6-15'
endDate='2007-7-31'
presRange='[15,35]'
#profiles = get_selection_profiles(startDate, endDate, shape, presRange)
profiles = vp.get_profiles_by_id(str(ids).replace(' ',''), None, True)
if len(profiles) > 0:
selectionDf = vp.parse_into_df(profiles)
selectionDf.replace(-999, np.nan, inplace=True)
selectionDf.head()
pos = PchipOceanSlices()
iCol = 'temp'
xLab = 'pres'
yLab = iCol
xintp = 20
pLevelRange = [15,25]
pos = PchipOceanSlices(pLevelRange)
iDf = pos.make_interpolated_df(selectionDf, xintp, xLab, yLab)
iDf.date = pd.to_datetime(iDf.date)
print(iDf.shape)
iDf.head()
for profile_id, df in selectionDf.groupby('profile_id'):
#fig.subplots_adjust(hspace=.35, wspace=.35)
pdf = iDf[iDf['profile_id'] == profile_id]
if pdf.empty:
continue
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6,6))
ax = vp.plot_scatter(df, profile_id, 'temp', 'pres', axes)
ax.scatter(pdf.temp.iloc[0], pdf.pres.iloc[0])
badProfiles = ['3900495_189']
for row in iDf.itertuples():
coord = {}
coord['lat'] = row.lat
coord['long'] = row.lon
startDate = datetime.strftime(row.date - timedelta(days=15), '%Y-%m-%d')
endDate = datetime.strftime(row.date + timedelta(days=15), '%Y-%m-%d')
shape = vp.construct_box(coord, 5, 5)
print(row.profile_id)
vp.build_selection_page_url(startDate, endDate, shape, presRange)
```
| github_jupyter |
I quote myself from the last post:
> The number of tests and the probability to obtain at least one significant result increases with the number of variables (plus interactions) included in the Anova. According to Maxwell (2004) this may be a reason for prevalence of underpowered Anova studies. Researchers target some significant result by default, instead of planning sample size that would provide enough power so that all effects can be reliably discovered.
Maxwell (2004, p. 149) writes:
> a researcher who designs a 2 $\times$ 2 study with 10 participants per cell has a 71% chance of obtaining at least
one statistically significant result if the three effects he or she tests all reflect medium effect sizes. Of course, in
reality, some effects will often be smaller and others will be larger, but the general point here is that the probability of
being able to find something statistically significant and thus potentially publishable may be adequate while at the same
time the probability associated with any specific test may be much lower. Thus, from the perspective of a researcher who
aspires to obtain at least one statistically significant result, 10 participants per cell may be sufficient, despite the fact that a methodological evaluation would declare the study to be underpowered because the power for any single hypothesis is only .35.
What motivates the researcher to keep the N small? Clearly, testing more subjects is costly. But I think that in Anova designs there is additional motivation to keep N small. If we use large N we obtain all main effects and all interactions significant. This is usually not desirable because some of the effects/interactions are not predicted by researcher's theory and non-significant main effect/interaction is taken as an evidence for a lack of this component. Then the researcher needs to find some N that balances between something significant and everything significant. In particular the prediction of significant main effects and non significant interaction is attractive because it is much easier to achieve than other patterns.
Let's look at the probability of obtaining significant main effects and interaction in Anova. I'm lazy so instead of deriving closed-form results I use simulation. Let's assume 2 $\times$ 2 Anova design where the continuous outcome is given by $y= x_1 + x_2 + x_1 x_2 +\epsilon$ with $\epsilon \sim \mathcal{N}(0,2)$ and $x_1 \in \{0,1\}$ and $x_2 \in \{0,1\}$. We give equal weight to all three terms to give them equal start. It is plausible to include all three terms, because with psychological variables everything is correlated (CRUD factor). Let's first show that the interaction requires larger sample size than the main effects.
```
%pylab inline
from scipy import stats
Ns=np.arange(20,200,4);
K=10000;
ps=np.zeros((Ns.size,3))
res=np.zeros(4)
cs=np.zeros((Ns.size,8))
i=0
for N in Ns:
for k in range(K):
x1=np.zeros(N);x1[N/2:]=1
x2=np.mod(range(N),2)
y= 42+x1+x2+x1*x2+np.random.randn(N)*2
tot=np.square(y-y.mean()).sum()
x=np.ones((N,4))
x[:,1]=x1*x2
x[:,2]=x1*(1-x2)
x[:,3]=(1-x1)*x2
res[0]=np.linalg.lstsq(x,y)[1]
x=np.ones((N,2))
x[:,1]=x1
res[1]=tot-np.linalg.lstsq(x,y)[1]
x[:,1]=x2
res[2]=tot-np.linalg.lstsq(x,y)[1]
res[3]=tot-res[0]-res[1]-res[2]
mss=res/np.float32(np.array([N-4,1,1,1]))
F=mss[1:]/mss[0]
p=1-stats.f.cdf(F,1,N-4)
p=p<0.05
ps[i,:]+=np.int32(p)
cs[i,p[0]*4+p[1]*2+p[2]]+=1
i+=1
ps/=float(K)
cs/=float(K)
for k in range(ps.shape[1]): plt.plot(Ns/4, ps[:,k])
plt.legend(['A','B','X'],loc=2)
plt.xlabel('N per cell')
plt.ylabel('expected power');
```
Now we look at the probability that the various configurations of significant and non-significant results will be obtained.
```
plt.figure(figsize=(7,6))
for k in [0,1,2,3,6,7]: plt.plot(Ns/4, cs[:,k])
plt.legend(['nothing','X','B','BX','AB','ABX'],loc=2)
plt.xlabel('N per cell')
plt.ylabel('pattern frequency');
```
To keep the figure from too much clutter I omitted A and AX which is due to symmetry identical to B and BX. By A I mean "main effect A is significant and main effect B plus the interaction are not significant". X designates the presence of a significant interaction.
To state the unsurprising results first, if we decrease the sample size we are more likely to obtain no significant result. If we increase the sample size we are more likely to obtain the true model ABX. Because interaction requires large sample size to reach significance for medium sample size AB is more likely than the true model ABX. Furthermore, funny things happen if we make main effects the exclusive focus of our hypothesis. In the cases A,B and AB we can find a small-to-medium sample size that is optimal if we want to get our hypothesis significant. All this can be (unconsciously) exploited by researchers to provide more power for their favored pattern.
It is not difficult to see the applications. We could look up the frequency of various patterns in the psychological literature. This could be done in terms of the reported findings but also in terms of the reported hypotheses. We can also ask whether the reported sample size correlates with the optimal sample size.
Note, that there is nothing wrong with Anova. The purpose of Anova is NOT to provide a test for composite hypotheses such as X, AB or ABX. Rather it helps us discover sources of variability that can then be subjected to a more focused analysis. Anova is an exploratory technique and should not be used for evaluating of hypotheses.
| github_jupyter |
```
def get_Earth_temp(c20,T,cloud_re):
totall_cloudAndparticle_reflectfactor=cloud_re
ocean_t=T
totall_h20=0.0025
#co2单位ppm
totall_co2=c20
totall_co2=totall_co2*ma.pow(10,-6)
totall_radiation=1.7*10**17
#云及颗粒物对大气的吸收及反射影响比例系数均为0.2,短波反射,长波吸收
Longwave_absorptionCoeff_h20=0.8
def get_co2_coff(n):
return ma.log(1.2*n+0.005*ma.pow(n,2)+1.6*ma.pow(10,-6)*ma.pow(n,3))
Longwave_absorptionCoeff_c02=(get_co2_coff(c20)-get_co2_coff(291.4*(ma.pow(10,-6))))*3.3
Shortwave_absorptionCoeff=0.15
Longwave_absorptionCoeff_air=0
land_noIce=103
land_Ice=46
ocean_noIce=335
ocean_Ice=26
h20_climate_changefactor=0.08
c20_climate_changefactor=0.1
Earth_R=6378.137*(10**3)
Earth_S=ma.pi*Earth_R**2
Earth_C=Earth_S/0.67
M_Atmosphere=6000*10**15
#进入到地面的辐射
Earth_surface_radiation=totall_radiation*(1-Shortwave_absorptionCoeff-totall_cloudAndparticle_reflectfactor)
#陆地及海洋受到辐射总和
land_radiation=0.3*Earth_surface_radiation
ocean_radiation=0.7*Earth_surface_radiation
#陆地及海洋反射辐射,白天
land_noIce_factor=land_noIce/(land_noIce+land_Ice)
ocean_noIce_factor=ocean_noIce/(ocean_noIce+ocean_Ice)
radiation_landReflect=land_radiation*(land_noIce_factor*0.18+(1-land_noIce_factor)*0.85)
radiation_oceanReflect=ocean_radiation*(ocean_noIce_factor*0.07+(1-ocean_noIce_factor)*0.7)
radiation_Reflect_daytime=radiation_landReflect+radiation_oceanReflect
#白天辐射散热及大气吸收,短波吸收
radiation_out_AtmosphereAbsorb_daytime=(totall_radiation+radiation_Reflect_daytime)*Shortwave_absorptionCoeff
# radiation_out_totall_daytime=radiation_totall_daytime*(1-0.15)
#夜晚:陆地及海洋辐射经过大气被大气吸收的辐射,考虑云层吸收
land_absorb_radiation=land_radiation-radiation_landReflect
ocean_absorb_radiation=ocean_radiation-radiation_oceanReflect
AtmosphereAbsorbFactor_night=(totall_h20*Longwave_absorptionCoeff_h20*h20_climate_changefactor+totall_co2*Longwave_absorptionCoeff_c02*c20_climate_changefactor+(1-totall_h20-totall_co2)*Longwave_absorptionCoeff_air)
#陆地吸收系数由大气成分确定,海洋则将吸收热量辐射传热给大气
def get_ocean_Ra(t,x):
#参数为海洋平均温度t,海洋吸收总辐射x
#获得海洋发射辐射量
factor=0.13*ma.log(1+1.5*pow(t-13.1/13.1,2))+0.32
q_cloud=x*factor
return q_cloud
radiation_out_AtmosphereAbsorb_night=get_ocean_Ra(ocean_t,ocean_absorb_radiation)+land_absorb_radiation
# ocean_absorb_keep=radiation_out_AtmosphereAbsorb_night*0.7*0.5
Atmosphere_absorb_keep=(radiation_out_AtmosphereAbsorb_daytime+radiation_out_AtmosphereAbsorb_night)*AtmosphereAbsorbFactor_night
#夜晚,地球散热
radiation_landdirct_out_space=(land_absorb_radiation+ocean_absorb_radiation)*(1-AtmosphereAbsorbFactor_night)
radiation_out_space=radiation_out_AtmosphereAbsorb_night*0.5
#总散热
# radiation_out_totall=radiation_landdirct_out_space+radiation_out_space+radiation_out_totall_daytime+radiation_in_AtmosphereReflect
temp=Atmosphere_absorb_keep/Earth_C
def get_cloud_reflect(t,x):
#参数为海洋平均温度t,海洋吸收总辐射x
#获得云总反射系数
w=(2501.0-2382.5)/50
qi=(2501-t*w)
m0=44.8*ma.pow(10,12)
u=2*0.2*0.51*x/(qi*m0)
return u
a=get_cloud_reflect(ocean_t,ocean_absorb_radiation)
# print('co2的气候系数为:%f'%(ma.pow(10,-6)*Longwave_absorptionCoeff_c02*c20_climate_changefactor))
# print('大气吸收辐射系数:%f'%AtmosphereAbsorbFactor_night)
return temp,a,totall_cloudAndparticle_reflectfactor
year_team=[]
year_T=[]
s=0
co2=data['co2']
T=data['sst']
for n,m in zip(co2,T):
temps,cloud_fa,col=get_Earth_temp(n,m,0.15)
year_team.append(temps)
year_T.append(cloud_fa)
plt.figure(figsize=(8,8))
plt.plot(year_team)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math as ma
data=pd.read_excel(r'E:\研究生数学建模\prediction_data.xlsx')
data.head()
prediction=pd.DataFrame({'Team':year_team})
prediction=pd.concat([prediction,data['year']],axis=1)
prediction=prediction.set_index('year')
prediction.to_excel('E:\研究生数学建模\prediction_result.xlsx')
```
| github_jupyter |
# Fine-tuning and deploying ProtBert Model for Protein Classification using Amazon SageMaker
## Contents
1. [Motivation](#Motivation)
2. [What is ProtBert?](#What-is-ProtBert?)
3. [Notebook Overview](#Notebook-Overview)
- [Setup](#Setup)
4. [Dataset](#Dataset)
- [Download Data](#Download-Data)
5. [Data Exploration](#Data-Exploration)
- [Upload Data to S3](#Upload-Data-to-S3)
6. [Training script](#Training-script)
7. [Train on Amazon SageMaker](#Train-on-Amazon-SageMaker)
8. [Deploy the Model on Amazon SageMaker](#Deploy-the-model-on-Amazon-SageMaker)
- [Create a model object](#Create-a-model-object)
- [Deploy the model on an endpoint](#Deploy-the-model-on-an-endpoint)
9. [Predicting SubCellular Localization of Protein Sequences](#Predicting-SubCellular-Localization-of-Protein-Sequences)
10. [References](#References)
---
## Motivation
<img src="https://upload.wikimedia.org/wikipedia/commons/6/60/Myoglobin.png"
alt="Protein Sequence"
style="float: left;"
height = 100
width = 250/>
**Proteins** are the key fundamental macromolecules governing in biological bodies. The study of protein localization is important to comprehend the function of protein and has great importance for drug design and other applications. It also plays an important role in characterizing the cellular function of hypothetical and newly discovered proteins [1]. There are several research endeavours that aim to localize whole proteomes by using high-throughput approaches [2–4]. These large datasets provide important information about protein function, and more generally global cellular processes. However, they currently do not achieve 100% coverage of proteomes, and the methodology used can in some cases cause mislocalization of subsets of proteins [5,6]. Therefore, complementary methods are necessary to address these problems. In this notebook, we will leverage Natural Language Processing (NLP) techniques for protein sequence classification. The idea is to interpret protein sequences as sentences and their constituent – amino acids –
as single words [7]. More specifically we will fine tune Pytorch ProtBert model from Hugging Face library.
## What is ProtBert?
ProtBert is a pretrained model on protein sequences using a masked language modeling (MLM) objective. It is based on Bert model which is pretrained on a large corpus of protein sequences in a self-supervised fashion. This means it was pretrained on the raw protein sequences only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those protein sequences [8]. For more information about ProtBert, see [`ProtTrans: Towards Cracking the Language of Life’s Code Through Self-Supervised Deep Learning and High Performance Computing`](https://www.biorxiv.org/content/10.1101/2020.07.12.199554v2.full).
---
## Notebook Overview
This example notebook focuses on fine-tuning the Pytorch ProtBert model and deploying it using Amazon SageMaker, which is the most comprehensive and fully managed machine learning service. With SageMaker, data scientists and developers can quickly and easily build and train machine learning models, and then directly deploy them into a production-ready hosted environment.
During the training, we will leverage SageMaker distributed data parallel (SDP) feature which extends SageMaker’s training capabilities on deep learning models with near-linear scaling efficiency, achieving fast time-to-train with minimal code changes.
_**Note**_: Please select the Kernel as ` Python 3 (Pytorch 1.6 Python 3.6 CPU Optimized)`.
---
### Setup
To start, we import some Python libraries and initialize a SageMaker session, S3 bucket and prefix, and IAM role.
```
!pip install --upgrade pip -q
!pip install -U boto3 sagemaker -q
!pip install seaborn -q
```
Next let us import the common libraries needed for the operations done later.
```
import re
import json
import pandas as pd
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import numpy as np
import pandas as pd
import sagemaker
import torch
import seaborn as sns
import matplotlib.pyplot as plt
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
```
Next, let's verify the version, create a SageMaker session and get the execution role which is the IAM role arn used to give training and hosting access to your data.
```
import sagemaker
print(sagemaker.__version__)
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
```
Now we will specify the S3 bucket and prefix where you will store your training data and model artifacts. This should be within the same region as the Notebook Instance, training, and hosting.
```
bucket = sagemaker_session.default_bucket()
prefix = "sagemaker/DEMO-pytorch-bert"
```
As the last step of setting up the enviroment lets set a value to a random seed so that we can reproduce the same results later.
```
RANDOM_SEED = 43
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
```
---
## Dataset
We are going to use a opensource public dataset of protein sequences available [here](http://www.cbs.dtu.dk/services/DeepLoc-1.0/data.php). The dataset is a `fasta file` composed by header and protein sequence. The header is composed by the accession number from Uniprot, the annotated subcellular localization and possibly a description field indicating if the protein was part of the test set. The subcellular localization includes an additional label, where S indicates soluble, M membrane and U unknown[9].
Sample of the data is as follows :
```
>Q9SMX3 Mitochondrion-M test
MVKGPGLYTEIGKKARDLLYRDYQGDQKFSVTTYSSTGVAITTTGTNKGSLFLGDVATQVKNNNFTADVKVST
DSSLLTTLTFDEPAPGLKVIVQAKLPDHKSGKAEVQYFHDYAGISTSVGFTATPIVNFSGVVGTNGLSLGTDV
AYNTESGNFKHFNAGFNFTKDDLTASLILNDKGEKLNASYYQIVSPSTVVGAEISHNFTTKENAITVGTQHAL>
DPLTTVKARVNNAGVANALIQHEWRPKSFFTVSGEVDSKAIDKSAKVGIALALKP"
```
A sequence in FASTA format begins with a single-line description, followed by lines of sequence data. The definition line (defline) is distinguished from the sequence data by a greater-than (>) symbol at the beginning. The word following the ">" symbol is the identifier of the sequence, and the rest of the line is the description.
### Download Data
```
!wget http://www.cbs.dtu.dk/services/DeepLoc-1.0/deeploc_data.fasta -P ./data -q
```
Since the data is in fasta format, we can leverage `Bio.SeqIO.FastaIO` library to read the dataset. Let us install the Bio package.
```
!pip install Bio -q
import Bio
```
Using the Bio package we will read the data directly by filtering out the columns that are of interest. We will also add a space seperater between each character in the sequence field which will be useful during model training.
```
def read_fasta(file_path, columns) :
from Bio.SeqIO.FastaIO import SimpleFastaParser
with open('./data/deeploc_data.fasta') as fasta_file: # Will close handle cleanly
records = []
for title, sequence in SimpleFastaParser(fasta_file):
record = []
title_splits = title.split(None)
record.append(title_splits[0]) # First word is ID
sequence = " ".join(sequence)
record.append(sequence)
record.append(len(sequence))
location_splits = title_splits[1].split("-")
record.append(location_splits[0]) # Second word is Location
record.append(location_splits[1]) # Second word is Membrane
if(len(title_splits) > 2):
record.append(0)
else:
record.append(1)
records.append(record)
return pd.DataFrame(records, columns = columns)
data = read_fasta("./tmp/deeploc_data.fasta", columns=["id", "sequence", "sequence_length", "location", "membrane", "is_train"])
data.head()
```
### Data Exploration
Dataset consists of 14K sequences and 6 columns in total. We will only use the following columns during training:
* _**id**_ : Unique identifier given each sequence in the dataset.
* _**sequence**_ : Protein sequence. Each character is seperated by a "space". Will be useful for BERT tokernizer.
* _**sequence_length**_ : Character length of each protein sequence.
* _**location**_ : Classification given each sequence.
* _**is_train**_ : Indicates whether the record be used for training or test. Will be used to seperate the dataset for traning and validation.
First, let's verify if there are any missing values in the dataset.
```
data.info()
data.isnull().values.any()
```
As you can see, there are **no** missing values in this dataset.
Second, we will see the number of available classes (subcellular localization), which will be used for protein classification.
```
unique_classes = data.location.unique()
print("Number of classes: ", len(unique_classes))
unique_classes
```
We can see that there are 10 unique classes in the dataset.
Third, lets check the sequence length.
```
%matplotlib inline
%config InlineBackend.figure_format='retina'
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
ax = sns.distplot(data['sequence_length'].values)
ax.set_xlim(0, 3000)
plt.title(f'sequence length distribution')
plt.grid(True)
```
This is an important observation as PROTBERT model receives a fixed length of sentence as input. Usually the maximum length of a sentence depends on the data we are working on. For sentences that are shorter than this maximum length, we will have to add paddings (empty tokens) to the sentences to make up the length.
As you can see from the above plot that most of the sequences lie under the length of around 1500, therefore, its a good idea to select the `max_length = 1536` but that will increase the training time for this sample notebook, therefore, we will use `max_length = 512`. You can experiment it with the bigger length and it does improves the accuracy as most of the subcellular localization information of protiens is stored at the end of the sequence.
Next let's factorize the protein classes.
```
categories = data.location.astype('category').cat
data['location'] = categories.codes
class_names = categories.categories
num_classes = len(class_names)
print(class_names)
```
Next, let's devide the dataset into training and test. We can leverage the `is_train` column to do the split.
```
df_train = data[data.is_train == 1]
df_train = df_train.drop(["is_train"], axis = 1)
df_train.shape[0]
df_test = data[data.is_train == 0]
df_test = df_test.drop(["is_train"], axis = 1)
df_test.shape[0]
```
We got **11231** records as training set and **2773** records as the test set which is about 75:25 data split between the train and test. Also, the composition between multiple classes remains uniform between both datasets.
### Upload Data to S3
In order to accomodate model training on SageMaker we need to upload the data to s3 location. We are going to use the `sagemaker.Session.upload_data` function to upload our datasets to an S3 location. The return value inputs identifies the location -- we will use later when we start the training job.
```
train_dataset_path = './data/deeploc_per_protein_train.csv'
test_dataset_path = './data/deeploc_per_protein_test.csv'
df_train.to_csv(train_dataset_path)
df_test.to_csv(test_dataset_path)
inputs_train = sagemaker_session.upload_data(train_dataset_path, bucket=bucket, key_prefix=prefix)
inputs_test = sagemaker_session.upload_data(test_dataset_path, bucket=bucket, key_prefix=prefix)
print("S3 location for training data: ", inputs_train )
print("S3 location for testing data: ", inputs_test )
```
## Training script
We use the [PyTorch-Transformers library](https://pytorch.org/hub/huggingface_pytorch-transformers), which contains PyTorch implementations and pre-trained model weights for many NLP models, including BERT. As mentioned above, we will use `ProtBert model` which is pre-trained on protein sequences.
Our training script should save model artifacts learned during training to a file path called `model_dir`, as stipulated by the SageMaker PyTorch image. Upon completion of training, model artifacts saved in `model_dir` will be uploaded to S3 by SageMaker and will be used for deployment.
We save this script in a file named `train.py`, and put the file in a directory named `code/`. The full training script can be viewed under `code/`.
It also has the code required for distributed data parallel (DDP) training using SMDataParallel. It is very similar to a PyTorch training script you might run outside of SageMaker, but modified to run with SMDataParallel, which is a new capability in Amazon SageMaker to train deep learning models faster and cheaper. SMDataParallel's PyTorch client provides an alternative to PyTorch's native DDP. For details about how to use SMDataParallel's DDP in your native PyTorch script, see the [Getting Started with SMDataParallel tutorials](https://docs.aws.amazon.com/sagemaker/latest/dg/distributed-training.html#distributed-training-get-started).
```
!pygmentize code/train.py
```
### Train on Amazon SageMaker
We use Amazon SageMaker to train and deploy a model using our custom PyTorch code. The Amazon SageMaker Python SDK makes it easier to run a PyTorch script in Amazon SageMaker using its PyTorch estimator. After that, we can use the SageMaker Python SDK to deploy the trained model and run predictions. For more information on how to use this SDK with PyTorch, see [the SageMaker Python SDK documentation](https://sagemaker.readthedocs.io/en/stable/using_pytorch.html).
To start, we use the `PyTorch` estimator class to train our model. When creating our estimator, we make sure to specify a few things:
* `entry_point`: the name of our PyTorch script. It contains our training script, which loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model. It also contains code to load and run the model during inference.
* `source_dir`: the location of our training scripts and requirements.txt file. "requirements.txt" lists packages you want to use with your script.
* `framework_version`: the PyTorch version we want to use.
The PyTorch estimator supports both single-machine & multi-machine, distributed PyTorch training using SMDataParallel. _Our training script supports distributed training for only GPU instances_.
#### Instance types
SMDataParallel supports model training on SageMaker with the following instance types only:
- ml.p3.16xlarge
- ml.p3dn.24xlarge [Recommended]
- ml.p4d.24xlarge [Recommended]
#### Instance count
To get the best performance and the most out of SMDataParallel, you should use at least 2 instances, but you can also use 1 for testing this example.
#### Distribution strategy
Note that to use DDP mode, you update the the distribution strategy, and set it to use smdistributed dataparallel.
After creating the estimator, we then call fit(), which launches a training job. We use the Amazon S3 URIs where we uploaded the training data earlier.
```
# Training job will take around 20-25 mins to execute.
from sagemaker.pytorch import PyTorch
TRAINING_JOB_NAME="protbert-training-pytorch-{}".format(time.strftime("%m-%d-%Y-%H-%M-%S"))
print('Training job name: ', TRAINING_JOB_NAME)
estimator = PyTorch(
entry_point="train.py",
source_dir="code",
role=role,
framework_version="1.6.0",
py_version="py36",
instance_count=1, # this script support distributed training for only GPU instances.
instance_type="ml.p3.16xlarge",
distribution={'smdistributed':{
'dataparallel':{
'enabled': True
}
}
},
debugger_hook_config=False,
hyperparameters={
"epochs": 3,
"num_labels": num_classes,
"batch-size": 4,
"test-batch-size": 4,
"log-interval": 100,
"frozen_layers": 15,
},
metric_definitions=[
{'Name': 'train:loss', 'Regex': 'Training Loss: ([0-9\\.]+)'},
{'Name': 'test:accuracy', 'Regex': 'Validation Accuracy: ([0-9\\.]+)'},
{'Name': 'test:loss', 'Regex': 'Validation loss: ([0-9\\.]+)'},
]
)
estimator.fit({"training": inputs_train, "testing": inputs_test}, job_name=TRAINING_JOB_NAME)
```
With `max_length=512` and running the model for only 3 epochs we get the validation accuracy of around 65%, which is pretty decent. You can optimize it further by trying bigger sequence length, increasing the number of epochs and tuning other hyperparamters. For details you can refer to the research paper:
[`ProtTrans: Towards Cracking the Language of Life’s Code Through Self-Supervised Deep Learning and High Performance Computing`](https://arxiv.org/pdf/2007.06225.pdf).
Before, we deploy the model to an endpoint, let's first store the model to S3.
```
model_data = estimator.model_data
print("Storing {} as model_data".format(model_data))
%store model_data
%store -r model_data
# If no model was found, set it manually here.
# model_data = 's3://sagemaker-{region}-XXX/protbert-training-pytorch-XX-XX-XXXX-XX-XX-XX/output/model.tar.gz'
print("Using this model: {}".format(model_data))
```
## Deploy the model on Amazon SageMaker
After training our model, we host it on an Amazon SageMaker Endpoint. To make the endpoint load the model and serve predictions, we implement a few methods in inference.py.
- `model_fn()`: function defined to load the saved model and return a model object that can be used for model serving. The SageMaker PyTorch model server loads our model by invoking model_fn.
- `input_fn()`: deserializes and prepares the prediction input. In this example, our request body is first serialized to JSON and then sent to model serving endpoint. Therefore, in input_fn(), we first deserialize the JSON-formatted request body and return the input as a torch.tensor, as required for BERT.
- `predict_fn()`: performs the prediction and returns the result.
To deploy our endpoint, we call deploy() on our PyTorch estimator object, passing in our desired number of instances and instance type:
### Create a model object
You define the model object by using SageMaker SDK's PyTorchModel and pass in the model from the estimator and the entry_point. The function loads the model and sets it to use a GPU, if available.
```
import sagemaker
from sagemaker.pytorch import PyTorchModel
ENDPOINT_NAME = "protbert-inference-pytorch-1-{}".format(time.strftime("%m-%d-%Y-%H-%M-%S"))
print("Endpoint name: ", ENDPOINT_NAME)
model = PyTorchModel(model_data=model_data, source_dir='code',
entry_point='inference.py', role=role, framework_version='1.6.0', py_version='py3')
```
### Deploy the model on an endpoint
You create a predictor by using the model.deploy function. You can optionally change both the instance count and instance type.
```
%%time
predictor = model.deploy(initial_instance_count=1, instance_type='ml.m5.2xlarge', endpoint_name=ENDPOINT_NAME)
```
## Predicting SubCellular Localization of Protein Sequences
```
import boto3
runtime= boto3.client('runtime.sagemaker')
client = boto3.client('sagemaker')
endpoint_desc = client.describe_endpoint(EndpointName=ENDPOINT_NAME)
print(endpoint_desc)
print('---'*30)
```
We then configure the predictor to use application/json for the content type when sending requests to our endpoint:
```
predictor.serializer = sagemaker.serializers.JSONSerializer()
predictor.deserializer = sagemaker.deserializers.JSONDeserializer()
```
Finally, we use the returned predictor object to call the endpoint:
```
protein_sequence = 'M G K K D A S T T R T P V D Q Y R K Q I G R Q D Y K K N K P V L K A T R L K A E A K K A A I G I K E V I L V T I A I L V L L F A F Y A F F F L N L T K T D I Y E D S N N'
prediction = predictor.predict(protein_sequence)
print(prediction)
print(f'Protein Sequence: {protein_sequence}')
print("Sequence Localization Ground Truth is: {} - prediction is: {}".format('Endoplasmic.reticulum', class_names[prediction[0]]))
protein_sequence = 'M S M T I L P L E L I D K C I G S N L W V I M K S E R E F A G T L V G F D D Y V N I V L K D V T E Y D T V T G V T E K H S E M L L N G N G M C M L I P G G K P E'
prediction = predictor.predict(protein_sequence)
print(prediction)
print(f'Protein Sequence: {protein_sequence}')
print("Sequence Localization Ground Truth is: {} - prediction is: {}".format('Nucleus', class_names[prediction[0]]))
seq = 'M G G P T R R H Q E E G S A E C L G G P S T R A A P G P G L R D F H F T T A G P S K A D R L G D A A Q I H R E R M R P V Q C G D G S G E R V F L Q S P G S I G T L Y I R L D L N S Q R S T C C C L L N A G T K G M C'
prediction = predictor.predict(seq)
print(prediction)
print(f'Protein Sequence: {seq}')
print("Sequence Localization Ground Truth is: {} - prediction is: {}".format('Cytoplasm',class_names[prediction[0]]))
```
# Cleanup
Lastly, please remember to delete the Amazon SageMaker endpoint to avoid charges:
```
predictor.delete_endpoint()
```
## References
- [1] Refining Protein Subcellular Localization (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1289393/)
- [2] Kumar A, Agarwal S, Heyman JA, Matson S, Heidtman M, et al. Subcellular localization of the yeast proteome. Genes Dev. 2002;16:707–719. [PMC free article] [PubMed] [Google Scholar]
- [3] Huh WK, Falvo JV, Gerke LC, Carroll AS, Howson RW, et al. Global analysis of protein localization in budding yeast. Nature. 2003;425:686–691. [PubMed] [Google Scholar]
- [4] Wiemann S, Arlt D, Huber W, Wellenreuther R, Schleeger S, et al. From ORFeome to biology: A functional genomics pipeline. Genome Res. 2004;14:2136–2144. [PMC free article] [PubMed] [Google Scholar]
- [5] Davis TN. Protein localization in proteomics. Curr Opin Chem Biol. 2004;8:49–53. [PubMed] [Google Scholar]
- [6] Scott MS, Thomas DY, Hallett MT. Predicting subcellular localization via protein motif co-occurrence. Genome Res. 2004;14:1957–1966. [PMC free article] [PubMed] [Google Scholar]
- [7] ProtTrans: Towards Cracking the Language of Life's Code Through Self-Supervised Deep Learning and High Performance Computing (https://www.biorxiv.org/content/10.1101/2020.07.12.199554v2.full.pdf)
- [8] ProtBert Hugging Face (https://huggingface.co/Rostlab/prot_bert)
- [9] DeepLoc-1.0: Eukaryotic protein subcellular localization predictor (http://www.cbs.dtu.dk/services/DeepLoc-1.0/data.php)
| github_jupyter |
# The Autodiff Cookbook
[](https://colab.sandbox.google.com/github/google/jax/blob/master/docs/notebooks/autodiff_cookbook.ipynb)
*alexbw@, mattjj@*
JAX has a pretty general automatic differentiation system. In this notebook, we'll go through a whole bunch of neat autodiff ideas that you can cherry pick for your own work, starting with the basics.
```
import jax.numpy as jnp
from jax import grad, jit, vmap
from jax import random
key = random.PRNGKey(0)
```
## Gradients
### Starting with `grad`
You can differentiate a function with `grad`:
```
grad_tanh = grad(jnp.tanh)
print(grad_tanh(2.0))
```
`grad` takes a function and returns a function. If you have a Python function `f` that evaluates the mathematical function $f$, then `grad(f)` is a Python function that evaluates the mathematical function $\nabla f$. That means `grad(f)(x)` represents the value $\nabla f(x)$.
Since `grad` operates on functions, you can apply it to its own output to differentiate as many times as you like:
```
print(grad(grad(jnp.tanh))(2.0))
print(grad(grad(grad(jnp.tanh)))(2.0))
```
Let's look at computing gradients with `grad` in a linear logistic regression model. First, the setup:
```
def sigmoid(x):
return 0.5 * (jnp.tanh(x / 2) + 1)
# Outputs probability of a label being true.
def predict(W, b, inputs):
return sigmoid(jnp.dot(inputs, W) + b)
# Build a toy dataset.
inputs = jnp.array([[0.52, 1.12, 0.77],
[0.88, -1.08, 0.15],
[0.52, 0.06, -1.30],
[0.74, -2.49, 1.39]])
targets = jnp.array([True, True, False, True])
# Training loss is the negative log-likelihood of the training examples.
def loss(W, b):
preds = predict(W, b, inputs)
label_probs = preds * targets + (1 - preds) * (1 - targets)
return -jnp.sum(jnp.log(label_probs))
# Initialize random model coefficients
key, W_key, b_key = random.split(key, 3)
W = random.normal(W_key, (3,))
b = random.normal(b_key, ())
```
Use the `grad` function with its `argnums` argument to differentiate a function with respect to positional arguments.
```
# Differentiate `loss` with respect to the first positional argument:
W_grad = grad(loss, argnums=0)(W, b)
print('W_grad', W_grad)
# Since argnums=0 is the default, this does the same thing:
W_grad = grad(loss)(W, b)
print('W_grad', W_grad)
# But we can choose different values too, and drop the keyword:
b_grad = grad(loss, 1)(W, b)
print('b_grad', b_grad)
# Including tuple values
W_grad, b_grad = grad(loss, (0, 1))(W, b)
print('W_grad', W_grad)
print('b_grad', b_grad)
```
This `grad` API has a direct correspondence to the excellent notation in Spivak's classic *Calculus on Manifolds* (1965), also used in Sussman and Wisdom's [*Structure and Interpretation of Classical Mechanics*](http://mitpress.mit.edu/sites/default/files/titles/content/sicm_edition_2/book.html) (2015) and their [*Functional Differential Geometry*](https://mitpress.mit.edu/books/functional-differential-geometry) (2013). Both books are open-access. See in particular the "Prologue" section of *Functional Differential Geometry* for a defense of this notation.
Essentially, when using the `argnums` argument, if `f` is a Python function for evaluating the mathematical function $f$, then the Python expression `grad(f, i)` evaluates to a Python function for evaluating $\partial_i f$.
### Differentiating with respect to nested lists, tuples, and dicts
Differentiating with respect to standard Python containers just works, so use tuples, lists, and dicts (and arbitrary nesting) however you like.
```
def loss2(params_dict):
preds = predict(params_dict['W'], params_dict['b'], inputs)
label_probs = preds * targets + (1 - preds) * (1 - targets)
return -jnp.sum(jnp.log(label_probs))
print(grad(loss2)({'W': W, 'b': b}))
```
You can [register your own container types](https://github.com/google/jax/issues/446#issuecomment-467105048) to work with not just `grad` but all the JAX transformations (`jit`, `vmap`, etc.).
### Evaluate a function and its gradient using `value_and_grad`
Another convenient function is `value_and_grad` for efficiently computing both a function's value as well as its gradient's value:
```
from jax import value_and_grad
loss_value, Wb_grad = value_and_grad(loss, (0, 1))(W, b)
print('loss value', loss_value)
print('loss value', loss(W, b))
```
### Checking against numerical differences
A great thing about derivatives is that they're straightforward to check with finite differences:
```
# Set a step size for finite differences calculations
eps = 1e-4
# Check b_grad with scalar finite differences
b_grad_numerical = (loss(W, b + eps / 2.) - loss(W, b - eps / 2.)) / eps
print('b_grad_numerical', b_grad_numerical)
print('b_grad_autodiff', grad(loss, 1)(W, b))
# Check W_grad with finite differences in a random direction
key, subkey = random.split(key)
vec = random.normal(subkey, W.shape)
unitvec = vec / jnp.sqrt(jnp.vdot(vec, vec))
W_grad_numerical = (loss(W + eps / 2. * unitvec, b) - loss(W - eps / 2. * unitvec, b)) / eps
print('W_dirderiv_numerical', W_grad_numerical)
print('W_dirderiv_autodiff', jnp.vdot(grad(loss)(W, b), unitvec))
```
JAX provides a simple convenience function that does essentially the same thing, but checks up to any order of differentiation that you like:
```
from jax.test_util import check_grads
check_grads(loss, (W, b), order=2) # check up to 2nd order derivatives
```
### Hessian-vector products with `grad`-of-`grad`
One thing we can do with higher-order `grad` is build a Hessian-vector product function. (Later on we'll write an even more efficient implementation that mixes both forward- and reverse-mode, but this one will use pure reverse-mode.)
A Hessian-vector product function can be useful in a [truncated Newton Conjugate-Gradient algorithm](https://en.wikipedia.org/wiki/Truncated_Newton_method) for minimizing smooth convex functions, or for studying the curvature of neural network training objectives (e.g. [1](https://arxiv.org/abs/1406.2572), [2](https://arxiv.org/abs/1811.07062), [3](https://arxiv.org/abs/1706.04454), [4](https://arxiv.org/abs/1802.03451)).
For a scalar-valued function $f : \mathbb{R}^n \to \mathbb{R}$ with continuous second derivatives (so that the Hessian matrix is symmetric), the Hessian at a point $x \in \mathbb{R}^n$ is written as $\partial^2 f(x)$. A Hessian-vector product function is then able to evaluate
$\qquad v \mapsto \partial^2 f(x) \cdot v$
for any $v \in \mathbb{R}^n$.
The trick is not to instantiate the full Hessian matrix: if $n$ is large, perhaps in the millions or billions in the context of neural networks, then that might be impossible to store.
Luckily, `grad` already gives us a way to write an efficient Hessian-vector product function. We just have to use the identity
$\qquad \partial^2 f (x) v = \partial [x \mapsto \partial f(x) \cdot v] = \partial g(x)$,
where $g(x) = \partial f(x) \cdot v$ is a new scalar-valued function that dots the gradient of $f$ at $x$ with the vector $v$. Notice that we're only ever differentiating scalar-valued functions of vector-valued arguments, which is exactly where we know `grad` is efficient.
In JAX code, we can just write this:
```
def hvp(f, x, v):
return grad(lambda x: jnp.vdot(grad(f)(x), v))(x)
```
This example shows that you can freely use lexical closure, and JAX will never get perturbed or confused.
We'll check this implementation a few cells down, once we see how to compute dense Hessian matrices. We'll also write an even better version that uses both forward-mode and reverse-mode.
### Jacobians and Hessians using `jacfwd` and `jacrev`
You can compute full Jacobian matrices using the `jacfwd` and `jacrev` functions:
```
from jax import jacfwd, jacrev
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
J = jacfwd(f)(W)
print("jacfwd result, with shape", J.shape)
print(J)
J = jacrev(f)(W)
print("jacrev result, with shape", J.shape)
print(J)
```
These two functions compute the same values (up to machine numerics), but differ in their implementation: `jacfwd` uses forward-mode automatic differentiation, which is more efficient for "tall" Jacobian matrices, while `jacrev` uses reverse-mode, which is more efficient for "wide" Jacobian matrices. For matrices that are near-square, `jacfwd` probably has an edge over `jacrev`.
You can also use `jacfwd` and `jacrev` with container types:
```
def predict_dict(params, inputs):
return predict(params['W'], params['b'], inputs)
J_dict = jacrev(predict_dict)({'W': W, 'b': b}, inputs)
for k, v in J_dict.items():
print("Jacobian from {} to logits is".format(k))
print(v)
```
For more details on forward- and reverse-mode, as well as how to implement `jacfwd` and `jacrev` as efficiently as possible, read on!
Using a composition of two of these functions gives us a way to compute dense Hessian matrices:
```
def hessian(f):
return jacfwd(jacrev(f))
H = hessian(f)(W)
print("hessian, with shape", H.shape)
print(H)
```
This shape makes sense: if we start with a function $f : \mathbb{R}^n \to \mathbb{R}^m$, then at a point $x \in \mathbb{R}^n$ we expect to get the shapes
* $f(x) \in \mathbb{R}^m$, the value of $f$ at $x$,
* $\partial f(x) \in \mathbb{R}^{m \times n}$, the Jacobian matrix at $x$,
* $\partial^2 f(x) \in \mathbb{R}^{m \times n \times n}$, the Hessian at $x$,
and so on.
To implement `hessian`, we could have used `jacfwd(jacrev(f))` or `jacrev(jacfwd(f))` or any other composition of the two. But forward-over-reverse is typically the most efficient. That's because in the inner Jacobian computation we're often differentiating a function wide Jacobian (maybe like a loss function $f : \mathbb{R}^n \to \mathbb{R}$), while in the outer Jacobian computation we're differentiating a function with a square Jacobian (since $\nabla f : \mathbb{R}^n \to \mathbb{R}^n$), which is where forward-mode wins out.
## How it's made: two foundational autodiff functions
### Jacobian-Vector products (JVPs, aka forward-mode autodiff)
JAX includes efficient and general implementations of both forward- and reverse-mode automatic differentiation. The familiar `grad` function is built on reverse-mode, but to explain the difference in the two modes, and when each can be useful, we need a bit of math background.
#### JVPs in math
Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}^m$, the Jacobian of $f$ evaluated at an input point $x \in \mathbb{R}^n$, denoted $\partial f(x)$, is often thought of as a matrix in $\mathbb{R}^m \times \mathbb{R}^n$:
$\qquad \partial f(x) \in \mathbb{R}^{m \times n}$.
But we can also think of $\partial f(x)$ as a linear map, which maps the tangent space of the domain of $f$ at the point $x$ (which is just another copy of $\mathbb{R}^n$) to the tangent space of the codomain of $f$ at the point $f(x)$ (a copy of $\mathbb{R}^m$):
$\qquad \partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$.
This map is called the [pushforward map](https://en.wikipedia.org/wiki/Pushforward_(differential)) of $f$ at $x$. The Jacobian matrix is just the matrix for this linear map in a standard basis.
If we don't commit to one specific input point $x$, then we can think of the function $\partial f$ as first taking an input point and returning the Jacobian linear map at that input point:
$\qquad \partial f : \mathbb{R}^n \to \mathbb{R}^n \to \mathbb{R}^m$.
In particular, we can uncurry things so that given input point $x \in \mathbb{R}^n$ and a tangent vector $v \in \mathbb{R}^n$, we get back an output tangent vector in $\mathbb{R}^m$. We call that mapping, from $(x, v)$ pairs to output tangent vectors, the *Jacobian-vector product*, and write it as
$\qquad (x, v) \mapsto \partial f(x) v$
#### JVPs in JAX code
Back in Python code, JAX's `jvp` function models this transformation. Given a Python function that evaluates $f$, JAX's `jvp` is a way to get a Python function for evaluating $(x, v) \mapsto (f(x), \partial f(x) v)$.
```
from jax import jvp
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
key, subkey = random.split(key)
v = random.normal(subkey, W.shape)
# Push forward the vector `v` along `f` evaluated at `W`
y, u = jvp(f, (W,), (v,))
```
In terms of Haskell-like type signatures, we could write
```haskell
jvp :: (a -> b) -> a -> T a -> (b, T b)
```
where we use `T a` to denote the type of the tangent space for `a`. In words, `jvp` takes as arguments a function of type `a -> b`, a value of type `a`, and a tangent vector value of type `T a`. It gives back a pair consisting of a value of type `b` and an output tangent vector of type `T b`.
The `jvp`-transformed function is evaluated much like the original function, but paired up with each primal value of type `a` it pushes along tangent values of type `T a`. For each primitive numerical operation that the original function would have applied, the `jvp`-transformed function executes a "JVP rule" for that primitive that both evaluates the primitive on the primals and applies the primitive's JVP at those primal values.
That evaluation strategy has some immediate implications about computational complexity: since we evaluate JVPs as we go, we don't need to store anything for later, and so the memory cost is independent of the depth of the computation. In addition, the FLOP cost of the `jvp`-transformed function is about 3x the cost of just evaluating the function (one unit of work for evaluating the original function, for example `sin(x)`; one unit for linearizing, like `cos(x)`; and one unit for applying the linearized function to a vector, like `cos_x * v`). Put another way, for a fixed primal point $x$, we can evaluate $v \mapsto \partial f(x) \cdot v$ for about the same marginal cost as evaluating $f$.
That memory complexity sounds pretty compelling! So why don't we see forward-mode very often in machine learning?
To answer that, first think about how you could use a JVP to build a full Jacobian matrix. If we apply a JVP to a one-hot tangent vector, it reveals one column of the Jacobian matrix, corresponding to the nonzero entry we fed in. So we can build a full Jacobian one column at a time, and to get each column costs about the same as one function evaluation. That will be efficient for functions with "tall" Jacobians, but inefficient for "wide" Jacobians.
If you're doing gradient-based optimization in machine learning, you probably want to minimize a loss function from parameters in $\mathbb{R}^n$ to a scalar loss value in $\mathbb{R}$. That means the Jacobian of this function is a very wide matrix: $\partial f(x) \in \mathbb{R}^{1 \times n}$, which we often identify with the Gradient vector $\nabla f(x) \in \mathbb{R}^n$. Building that matrix one column at a time, with each call taking a similar number of FLOPs to evaluating the original function, sure seems inefficient! In particular, for training neural networks, where $f$ is a training loss function and $n$ can be in the millions or billions, this approach just won't scale.
To do better for functions like this, we just need to use reverse-mode.
### Vector-Jacobian products (VJPs, aka reverse-mode autodiff)
Where forward-mode gives us back a function for evaluating Jacobian-vector products, which we can then use to build Jacobian matrices one column at a time, reverse-mode is a way to get back a function for evaluating vector-Jacobian products (equivalently Jacobian-transpose-vector products), which we can use to build Jacobian matrices one row at a time.
#### VJPs in math
Let's again consider a function $f : \mathbb{R}^n \to \mathbb{R}^m$.
Starting from our notation for JVPs, the notation for VJPs is pretty simple:
$\qquad (x, v) \mapsto v \partial f(x)$,
where $v$ is an element of the cotangent space of $f$ at $x$ (isomorphic to another copy of $\mathbb{R}^m$). When being rigorous, we should think of $v$ as a linear map $v : \mathbb{R}^m \to \mathbb{R}$, and when we write $v \partial f(x)$ we mean function composition $v \circ \partial f(x)$, where the types work out because $\partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$. But in the common case we can identify $v$ with a vector in $\mathbb{R}^m$ and use the two almost interchageably, just like we might sometimes flip between "column vectors" and "row vectors" without much comment.
With that identification, we can alternatively think of the linear part of a VJP as the transpose (or adjoint conjugate) of the linear part of a JVP:
$\qquad (x, v) \mapsto \partial f(x)^\mathsf{T} v$.
For a given point $x$, we can write the signature as
$\qquad \partial f(x)^\mathsf{T} : \mathbb{R}^m \to \mathbb{R}^n$.
The corresponding map on cotangent spaces is often called the [pullback](https://en.wikipedia.org/wiki/Pullback_(differential_geometry))
of $f$ at $x$. The key for our purposes is that it goes from something that looks like the output of $f$ to something that looks like the input of $f$, just like we might expect from a transposed linear function.
#### VJPs in JAX code
Switching from math back to Python, the JAX function `vjp` can take a Python function for evaluating $f$ and give us back a Python function for evaluating the VJP $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$.
```
from jax import vjp
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
y, vjp_fun = vjp(f, W)
key, subkey = random.split(key)
u = random.normal(subkey, y.shape)
# Pull back the covector `u` along `f` evaluated at `W`
v = vjp_fun(u)
```
In terms of Haskell-like type signatures, we could write
```haskell
vjp :: (a -> b) -> a -> (b, CT b -> CT a)
```
where we use `CT a` to denote the type for the cotangent space for `a`. In words, `vjp` takes as arguments a function of type `a -> b` and a point of type `a`, and gives back a pair consisting of a value of type `b` and a linear map of type `CT b -> CT a`.
This is great because it lets us build Jacobian matrices one row at a time, and the FLOP cost for evaluating $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$ is only about three times the cost of evaluating $f$. In particular, if we want the gradient of a function $f : \mathbb{R}^n \to \mathbb{R}$, we can do it in just one call. That's how `grad` is efficient for gradient-based optimization, even for objectives like neural network training loss functions on millions or billions of parameters.
There's a cost, though: though the FLOPs are friendly, memory scales with the depth of the computation. Also, the implementation is traditionally more complex than that of forward-mode, though JAX has some tricks up its sleeve (that's a story for a future notebook!).
For more on how reverse-mode works, see [this tutorial video from the Deep Learning Summer School in 2017](http://videolectures.net/deeplearning2017_johnson_automatic_differentiation/).
### Vector-valued gradients with VJPs
If you're interested in taking vector-valued gradients (like `tf.gradients`):
```
from jax import vjp
def vgrad(f, x):
y, vjp_fn = vjp(f, x)
return vjp_fn(jnp.ones(y.shape))[0]
print(vgrad(lambda x: 3*x**2, jnp.ones((2, 2))))
```
### Hessian-vector products using both forward- and reverse-mode
In a previous section, we implemented a Hessian-vector product function just using reverse-mode (assuming continuous second derivatives):
```
def hvp(f, x, v):
return grad(lambda x: jnp.vdot(grad(f)(x), v))(x)
```
That's efficient, but we can do even better and save some memory by using forward-mode together with reverse-mode.
Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}$ to differentiate, a point $x \in \mathbb{R}^n$ at which to linearize the function, and a vector $v \in \mathbb{R}^n$, the Hessian-vector product function we want is
$(x, v) \mapsto \partial^2 f(x) v$
Consider the helper function $g : \mathbb{R}^n \to \mathbb{R}^n$ defined to be the derivative (or gradient) of $f$, namely $g(x) = \partial f(x)$. All we need is its JVP, since that will give us
$(x, v) \mapsto \partial g(x) v = \partial^2 f(x) v$.
We can translate that almost directly into code:
```
from jax import jvp, grad
# forward-over-reverse
def hvp(f, primals, tangents):
return jvp(grad(f), primals, tangents)[1]
```
Even better, since we didn't have to call `jnp.dot` directly, this `hvp` function works with arrays of any shape and with arbitrary container types (like vectors stored as nested lists/dicts/tuples), and doesn't even have a dependence on `jax.numpy`.
Here's an example of how to use it:
```
def f(X):
return jnp.sum(jnp.tanh(X)**2)
key, subkey1, subkey2 = random.split(key, 3)
X = random.normal(subkey1, (30, 40))
V = random.normal(subkey2, (30, 40))
ans1 = hvp(f, (X,), (V,))
ans2 = jnp.tensordot(hessian(f)(X), V, 2)
print(jnp.allclose(ans1, ans2, 1e-4, 1e-4))
```
Another way you might consider writing this is using reverse-over-forward:
```
# reverse-over-forward
def hvp_revfwd(f, primals, tangents):
g = lambda primals: jvp(f, primals, tangents)[1]
return grad(g)(primals)
```
That's not quite as good, though, because forward-mode has less overhead than reverse-mode, and since the outer differentiation operator here has to differentiate a larger computation than the inner one, keeping forward-mode on the outside works best:
```
# reverse-over-reverse, only works for single arguments
def hvp_revrev(f, primals, tangents):
x, = primals
v, = tangents
return grad(lambda x: jnp.vdot(grad(f)(x), v))(x)
print("Forward over reverse")
%timeit -n10 -r3 hvp(f, (X,), (V,))
print("Reverse over forward")
%timeit -n10 -r3 hvp_revfwd(f, (X,), (V,))
print("Reverse over reverse")
%timeit -n10 -r3 hvp_revrev(f, (X,), (V,))
print("Naive full Hessian materialization")
%timeit -n10 -r3 jnp.tensordot(hessian(f)(X), V, 2)
```
## Composing VJPs, JVPs, and `vmap`
### Jacobian-Matrix and Matrix-Jacobian products
Now that we have `jvp` and `vjp` transformations that give us functions to push-forward or pull-back single vectors at a time, we can use JAX's `vmap` [transformation](https://github.com/google/jax#auto-vectorization-with-vmap) to push and pull entire bases at once. In particular, we can use that to write fast matrix-Jacobian and Jacobian-matrix products.
```
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
# Pull back the covectors `m_i` along `f`, evaluated at `W`, for all `i`.
# First, use a list comprehension to loop over rows in the matrix M.
def loop_mjp(f, x, M):
y, vjp_fun = vjp(f, x)
return jnp.vstack([vjp_fun(mi) for mi in M])
# Now, use vmap to build a computation that does a single fast matrix-matrix
# multiply, rather than an outer loop over vector-matrix multiplies.
def vmap_mjp(f, x, M):
y, vjp_fun = vjp(f, x)
outs, = vmap(vjp_fun)(M)
return outs
key = random.PRNGKey(0)
num_covecs = 128
U = random.normal(key, (num_covecs,) + y.shape)
loop_vs = loop_mjp(f, W, M=U)
print('Non-vmapped Matrix-Jacobian product')
%timeit -n10 -r3 loop_mjp(f, W, M=U)
print('\nVmapped Matrix-Jacobian product')
vmap_vs = vmap_mjp(f, W, M=U)
%timeit -n10 -r3 vmap_mjp(f, W, M=U)
assert jnp.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Matrix-Jacobian Products should be identical'
def loop_jmp(f, W, M):
# jvp immediately returns the primal and tangent values as a tuple,
# so we'll compute and select the tangents in a list comprehension
return jnp.vstack([jvp(f, (W,), (mi,))[1] for mi in M])
def vmap_jmp(f, W, M):
_jvp = lambda s: jvp(f, (W,), (s,))[1]
return vmap(_jvp)(M)
num_vecs = 128
S = random.normal(key, (num_vecs,) + W.shape)
loop_vs = loop_jmp(f, W, M=S)
print('Non-vmapped Jacobian-Matrix product')
%timeit -n10 -r3 loop_jmp(f, W, M=S)
vmap_vs = vmap_jmp(f, W, M=S)
print('\nVmapped Jacobian-Matrix product')
%timeit -n10 -r3 vmap_jmp(f, W, M=S)
assert jnp.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Jacobian-Matrix products should be identical'
```
### The implementation of `jacfwd` and `jacrev`
Now that we've seen fast Jacobian-matrix and matrix-Jacobian products, it's not hard to guess how to write `jacfwd` and `jacrev`. We just use the same technique to push-forward or pull-back an entire standard basis (isomorphic to an identity matrix) at once.
```
from jax import jacrev as builtin_jacrev
def our_jacrev(f):
def jacfun(x):
y, vjp_fun = vjp(f, x)
# Use vmap to do a matrix-Jacobian product.
# Here, the matrix is the Euclidean basis, so we get all
# entries in the Jacobian at once.
J, = vmap(vjp_fun, in_axes=0)(jnp.eye(len(y)))
return J
return jacfun
assert jnp.allclose(builtin_jacrev(f)(W), our_jacrev(f)(W)), 'Incorrect reverse-mode Jacobian results!'
from jax import jacfwd as builtin_jacfwd
def our_jacfwd(f):
def jacfun(x):
_jvp = lambda s: jvp(f, (x,), (s,))[1]
Jt =vmap(_jvp, in_axes=1)(jnp.eye(len(x)))
return jnp.transpose(Jt)
return jacfun
assert jnp.allclose(builtin_jacfwd(f)(W), our_jacfwd(f)(W)), 'Incorrect forward-mode Jacobian results!'
```
Interestingly, [Autograd](https://github.com/hips/autograd) couldn't do this. Our [implementation](https://github.com/HIPS/autograd/blob/96a03f44da43cd7044c61ac945c483955deba957/autograd/differential_operators.py#L60) of reverse-mode `jacobian` in Autograd had to pull back one vector at a time with an outer-loop `map`. Pushing one vector at a time through the computation is much less efficient than batching it all together with `vmap`.
Another thing that Autograd couldn't do is `jit`. Interestingly, no matter how much Python dynamism you use in your function to be differentiated, we could always use `jit` on the linear part of the computation. For example:
```
def f(x):
try:
if x < 3:
return 2 * x ** 3
else:
raise ValueError
except ValueError:
return jnp.pi * x
y, f_vjp = vjp(f, 4.)
print(jit(f_vjp)(1.))
```
## Complex numbers and differentiation
JAX is great at complex numbers and differentiation. To support both [holomorphic and non-holomorphic differentiation](https://en.wikipedia.org/wiki/Holomorphic_function), it helps to think in terms of JVPs and VJPs.
Consider a complex-to-complex function $f: \mathbb{C} \to \mathbb{C}$ and identify it with a corresponding function $g: \mathbb{R}^2 \to \mathbb{R}^2$,
```
def f(z):
x, y = jnp.real(z), jnp.imag(z)
return u(x, y) + v(x, y) * 1j
def g(x, y):
return (u(x, y), v(x, y))
```
That is, we've decomposed $f(z) = u(x, y) + v(x, y) i$ where $z = x + y i$, and identified $\mathbb{C}$ with $\mathbb{R}^2$ to get $g$.
Since $g$ only involves real inputs and outputs, we already know how to write a Jacobian-vector product for it, say given a tangent vector $(c, d) \in \mathbb{R}^2$, namely
$\begin{bmatrix} \partial_0 u(x, y) & \partial_1 u(x, y) \\ \partial_0 v(x, y) & \partial_1 v(x, y) \end{bmatrix}
\begin{bmatrix} c \\ d \end{bmatrix}$.
To get a JVP for the original function $f$ applied to a tangent vector $c + di \in \mathbb{C}$, we just use the same definition and identify the result as another complex number,
$\partial f(x + y i)(c + d i) =
\begin{matrix} \begin{bmatrix} 1 & i \end{bmatrix} \\ ~ \end{matrix}
\begin{bmatrix} \partial_0 u(x, y) & \partial_1 u(x, y) \\ \partial_0 v(x, y) & \partial_1 v(x, y) \end{bmatrix}
\begin{bmatrix} c \\ d \end{bmatrix}$.
That's our definition of the JVP of a $\mathbb{C} \to \mathbb{C}$ function! Notice it doesn't matter whether or not $f$ is holomorphic: the JVP is unambiguous.
Here's a check:
```
def check(seed):
key = random.PRNGKey(seed)
# random coeffs for u and v
key, subkey = random.split(key)
a, b, c, d = random.uniform(subkey, (4,))
def fun(z):
x, y = jnp.real(z), jnp.imag(z)
return u(x, y) + v(x, y) * 1j
def u(x, y):
return a * x + b * y
def v(x, y):
return c * x + d * y
# primal point
key, subkey = random.split(key)
x, y = random.uniform(subkey, (2,))
z = x + y * 1j
# tangent vector
key, subkey = random.split(key)
c, d = random.uniform(subkey, (2,))
z_dot = c + d * 1j
# check jvp
_, ans = jvp(fun, (z,), (z_dot,))
expected = (grad(u, 0)(x, y) * c +
grad(u, 1)(x, y) * d +
grad(v, 0)(x, y) * c * 1j+
grad(v, 1)(x, y) * d * 1j)
print(jnp.allclose(ans, expected))
check(0)
check(1)
check(2)
```
What about VJPs? We do something pretty similar: for a cotangent vector $c + di \in \mathbb{C}$ we define the VJP of $f$ as
$(c + di)^* \; \partial f(x + y i) =
\begin{matrix} \begin{bmatrix} c & -d \end{bmatrix} \\ ~ \end{matrix}
\begin{bmatrix} \partial_0 u(x, y) & \partial_1 u(x, y) \\ \partial_0 v(x, y) & \partial_1 v(x, y) \end{bmatrix}
\begin{bmatrix} 1 \\ -i \end{bmatrix}$.
What's with the negatives? They're just to take care of complex conjugation, and the fact that we're working with covectors.
Here's a check of the VJP rules:
```
def check(seed):
key = random.PRNGKey(seed)
# random coeffs for u and v
key, subkey = random.split(key)
a, b, c, d = random.uniform(subkey, (4,))
def fun(z):
x, y = jnp.real(z), jnp.imag(z)
return u(x, y) + v(x, y) * 1j
def u(x, y):
return a * x + b * y
def v(x, y):
return c * x + d * y
# primal point
key, subkey = random.split(key)
x, y = random.uniform(subkey, (2,))
z = x + y * 1j
# cotangent vector
key, subkey = random.split(key)
c, d = random.uniform(subkey, (2,))
z_bar = jnp.array(c + d * 1j) # for dtype control
# check vjp
_, fun_vjp = vjp(fun, z)
ans, = fun_vjp(z_bar)
expected = (grad(u, 0)(x, y) * c +
grad(v, 0)(x, y) * (-d) +
grad(u, 1)(x, y) * c * (-1j) +
grad(v, 1)(x, y) * (-d) * (-1j))
assert jnp.allclose(ans, expected, atol=1e-5, rtol=1e-5)
check(0)
check(1)
check(2)
```
What about convenience wrappers like `grad`, `jacfwd`, and `jacrev`?
For $\mathbb{R} \to \mathbb{R}$ functions, recall we defined `grad(f)(x)` as being `vjp(f, x)[1](1.0)`, which works because applying a VJP to a `1.0` value reveals the gradient (i.e. Jacobian, or derivative). We can do the same thing for $\mathbb{C} \to \mathbb{R}$ functions: we can still use `1.0` as the cotangent vector, and we just get out a complex number result summarizing the full Jacobian:
```
def f(z):
x, y = jnp.real(z), jnp.imag(z)
return x**2 + y**2
z = 3. + 4j
grad(f)(z)
```
For geneneral $\mathbb{C} \to \mathbb{C}$ functions, the Jacobian has 4 real-valued degrees of freedom (as in the 2x2 Jacobian matrices above), so we can't hope to represent all of them with in a complex number. But we can for holomorphic functions! A holomorphic function is precisely a $\mathbb{C} \to \mathbb{C}$ function with the special property that its derivative can be represented as a single complex number. (The [Cauchy-Riemann equations](https://en.wikipedia.org/wiki/Cauchy%E2%80%93Riemann_equations) ensure that the above 2x2 Jacobians have the special form of a scale-and-rotate matrix in the complex plane, i.e. the action of a single complex number under multiplication.) And we can reveal that one complex number using a single call to `vjp` with a covector of `1.0`.
Because this only works for holomorphic functions, to use this trick we need to promise JAX that our function is holomorphic; otherwise, JAX will raise an error when `grad` is used for a complex-output function:
```
def f(z):
return jnp.sin(z)
z = 3. + 4j
grad(f, holomorphic=True)(z)
```
All the `holomorphic=True` promise does is disable the error when the output is complex-valued. We can still write `holomorphic=True` when the function isn't holomorphic, but the answer we get out won't represent the full Jacobian. Instead, it'll be the Jacobian of the function where we just discard the imaginary part of the output:
```
def f(z):
return jnp.conjugate(z)
z = 3. + 4j
grad(f, holomorphic=True)(z) # f is not actually holomorphic!
```
There are some useful upshots for how `grad` works here:
1. We can use `grad` on holomorphic $\mathbb{C} \to \mathbb{C}$ functions.
2. We can use `grad` to optimize $f : \mathbb{C} \to \mathbb{R}$ functions, like real-valued loss functions of complex parameters `x`, by taking steps in the dierction of the conjugate of `grad(f)(x)`.
3. If we have an $\mathbb{R} \to \mathbb{R}$ function that just happens to use some complex-valued operations internally (some of which must be non-holomorphic, e.g. FFTs used in covolutions) then `grad` still works and we get the same result that an implementation using only real values would have given.
In any case, JVPs and VJPs are always unambiguous. And if we wanted to compute the full Jacobian matrix of a non-holomorphic $\mathbb{C} \to \mathbb{C}$ function, we can do it with JVPs or VJPs!
You should expect complex numbers to work everywhere in JAX. Here's differentiating through a Cholesky decomposition of a complex matrix:
```
A = jnp.array([[5., 2.+3j, 5j],
[2.-3j, 7., 1.+7j],
[-5j, 1.-7j, 12.]])
def f(X):
L = jnp.linalg.cholesky(X)
return jnp.sum((L - jnp.sin(L))**2)
grad(f, holomorphic=True)(A)
```
## More advanced autodiff
In this notebook, we worked through some easy, and then progressively more complicated, applications of automatic differentiation in JAX. We hope you now feel that taking derivatives in JAX is easy and powerful.
There's a whole world of other autodiff tricks and functionality out there. Topics we didn't cover, but hope to in a "Advanced Autodiff Cookbook" include:
- Gauss-Newton Vector Products, linearizing once
- Custom VJPs and JVPs
- Efficient derivatives at fixed-points
- Estimating the trace of a Hessian using random Hessian-vector products.
- Forward-mode autodiff using only reverse-mode autodiff.
- Taking derivatives with respect to custom data types.
- Checkpointing (binomial checkpointing for efficient reverse-mode, not model snapshotting).
- Optimizing VJPs with Jacobian pre-accumulation.
| github_jupyter |
```
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
file_name = 'Task_4.ipynb'
import subprocess
path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8")
path_to_file = path_to_file.replace(file_name,"").replace('\n',"")
print(path_to_file)
os.chdir(path_to_file)
!pwd
pip install osmnx==0.16.2
!pip uninstall matplotlib
!pip install matplotlib==3.1.3
import numpy as np
import pandas as pd
import osmnx as ox
from shapely.geometry import Polygon
from shapely.geometry import shape
import json
import os
import time
import matplotlib.pyplot as plt
import geopandas as gpd
import math
from collections import defaultdict
%matplotlib inline
pd.set_option('display.max_columns', None)
def getMinMax(xMax, yMax, xMin, yMin, bFirst, xLong, yLat):
# return the min, max of x, y coord
if bFirst:
xMin = xMax = xLong
yMin = yMax = yLat
bFirst = False
else:
if xLong > xMin:
xMin = xLong
elif xLong < xMax:
xMax = xLong
if yLat < yMin:
yMin = yLat
elif yLat > yMax:
yMax = yLat
return xMax, yMax, xMin, yMin, bFirst
```
# Task 4
Task 4: - Plot trajectory route onto road network
```
# import results trajectory dataset
dfMapped = pd.read_csv('./data/matchedMaps.csv')
dfMapped = pd.DataFrame(dfMapped)
mapList = []
count = 0
xMax = yMax = 0
xMin = yMin = 0
bFirst = True
for item in dfMapped['mgeom']:
item = item[11: -1] # remove 1st 2 & last 2 brackets
map = []
if len(item) <= 2: # []
print('Skip empty row:', count, ', len:', len(item), ', element:', item)
else:
for elem in item.split(','):
xLong, yLat = elem.split()
map.append((float(xLong), float(yLat))) # tuple of x, y coords
xMax, yMax, xMin, yMin, bFirst = getMinMax(xMax, yMax, xMin, yMin, bFirst, xLong, yLat)
mapList.append(map)
count += 1
print('xMin:', xMin, ', yMin:', yMin, ', xMax:', xMax, ', yMax:', yMax)
# extract 10 trajectory from the list of 1000
limit = 10
mapListLen = len(dfMapped)
mapCoords = []
xMax = yMax = 0
xMin = yMin = 0
bFirst = True
#print('len:', trajListLen)
for i in range(limit):
tmpMap = mapList[i]
x = []
y = []
if len(tmpMap) <= 2: # []
print('Skip empty row:', i, ', len:', len(tmpMap), ', traj:', tmpMap)
else:
for tmpCord in tmpMap:
tmpX = tmpCord[0]
tmpY = tmpCord[1]
#print('x:', tmpX, ', y:', tmpY)
x.append(tmpX)
y.append(tmpY)
xMax, yMax, xMin, yMin, bFirst = getMinMax(xMax, yMax, xMin, yMin, bFirst, tmpX, tmpY)
mapCoords.append((x, y))
#print('x:', x)
#print('y:', y)
print('xMin:', xMin, ', yMin:', yMin, ', xMax:', xMax, ', yMax:', yMax)
# sub graph using bounding box method
startTime = time.time()
delta = 0.01
bounds = (xMax - delta / 2, xMin + delta / 2, yMin - delta * 0.7, yMax + delta * 0.7)
x1, x2, y1, y2 = bounds
boundaryPolygon = Polygon([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)])
G3 = ox.graph_from_polygon(boundaryPolygon, network_type = 'drive')
# plot all pts
startTime = time.time()
#marker = ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's']
marker = ['o', 'd', 'p', 'x', '<', 'v', '^', '+', '>', 's']
fig, ax = ox.plot_graph(G3, figsize = (20, 20), bgcolor = '#FFFFFF', \
node_color = '#ACFF97', node_size = 1, \
edge_color = '#C4D0FF', show = False, close = False)
for i in range(len(mapCoords)):
ax.plot(mapCoords[i][0], mapCoords[i][1], marker[i], ms = 8, label = 'Route_{}'.format(i + 1))
plt.legend(fontsize = 'xx-large')
plt.tight_layout()
# plt.savefig('./5mostoften.png', dpi = 320)
print(" Duration %s seconds" % (time.time() - startTime))
```
| github_jupyter |
# Introduction to Deep Learning with PyTorch
In this notebook, you'll get introduced to [PyTorch](http://pytorch.org/), a framework for building and training neural networks. PyTorch in a lot of ways behaves like the arrays you love from Numpy. These Numpy arrays, after all, are just tensors. PyTorch takes these tensors and makes it simple to move them to GPUs for the faster processing needed when training neural networks. It also provides a module that automatically calculates gradients (for backpropagation!) and another module specifically for building neural networks. All together, PyTorch ends up being more coherent with Python and the Numpy/Scipy stack compared to TensorFlow and other frameworks.
## Neural Networks
Deep Learning is based on artificial neural networks which have been around in some form since the late 1950s. The networks are built from individual parts approximating neurons, typically called units or simply "neurons." Each unit has some number of weighted inputs. These weighted inputs are summed together (a linear combination) then passed through an activation function to get the unit's output.
<img src="assets/simple_neuron.png" width=400px>
Mathematically this looks like:
$$
\begin{align}
y &= f(w_1 x_1 + w_2 x_2 + b) \\
y &= f\left(\sum_i w_i x_i +b \right)
\end{align}
$$
With vectors this is the dot/inner product of two vectors:
$$
h = \begin{bmatrix}
x_1 \, x_2 \cdots x_n
\end{bmatrix}
\cdot
\begin{bmatrix}
w_1 \\
w_2 \\
\vdots \\
w_n
\end{bmatrix}
$$
## Tensors
It turns out neural network computations are just a bunch of linear algebra operations on *tensors*, a generalization of matrices. A vector is a 1-dimensional tensor, a matrix is a 2-dimensional tensor, an array with three indices is a 3-dimensional tensor (RGB color images for example). The fundamental data structure for neural networks are tensors and PyTorch (as well as pretty much every other deep learning framework) is built around tensors.
<img src="assets/tensor_examples.svg" width=600px>
With the basics covered, it's time to explore how we can use PyTorch to build a simple neural network.
```
# First, import PyTorch
import torch
def activation(x):
""" Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1+torch.exp(-x))
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 5 random normal variables
features = torch.randn((1, 5))
# True weights for our data, random normal variables again
weights = torch.randn_like(features)
# and a true bias term
bias = torch.randn((1, 1))
```
Above I generated data we can use to get the output of our simple network. This is all just random for now, going forward we'll start using normal data. Going through each relevant line:
`features = torch.randn((1, 5))` creates a tensor with shape `(1, 5)`, one row and five columns, that contains values randomly distributed according to the normal distribution with a mean of zero and standard deviation of one.
`weights = torch.randn_like(features)` creates another tensor with the same shape as `features`, again containing values from a normal distribution.
Finally, `bias = torch.randn((1, 1))` creates a single value from a normal distribution.
PyTorch tensors can be added, multiplied, subtracted, etc, just like Numpy arrays. In general, you'll use PyTorch tensors pretty much the same way you'd use Numpy arrays. They come with some nice benefits though such as GPU acceleration which we'll get to later. For now, use the generated data to calculate the output of this simple single layer network.
> **Exercise**: Calculate the output of the network with input features `features`, weights `weights`, and bias `bias`. Similar to Numpy, PyTorch has a [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) function, as well as a `.sum()` method on tensors, for taking sums. Use the function `activation` defined above as the activation function.
```
### Solution
# Now, make our labels from our data and true weights
y = activation(torch.sum(features * weights) + bias)
y = activation((features * weights).sum() + bias)
```
You can do the multiplication and sum in the same operation using a matrix multiplication. In general, you'll want to use matrix multiplications since they are more efficient and accelerated using modern libraries and high-performance computing on GPUs.
Here, we want to do a matrix multiplication of the features and the weights. For this we can use [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) or [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul) which is somewhat more complicated and supports broadcasting. If we try to do it with `features` and `weights` as they are, we'll get an error
```python
>> torch.mm(features, weights)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-13-15d592eb5279> in <module>()
----> 1 torch.mm(features, weights)
RuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033
```
As you're building neural networks in any framework, you'll see this often. Really often. What's happening here is our tensors aren't the correct shapes to perform a matrix multiplication. Remember that for matrix multiplications, the number of columns in the first tensor must equal to the number of rows in the second column. Both `features` and `weights` have the same shape, `(1, 5)`. This means we need to change the shape of `weights` to get the matrix multiplication to work.
**Note:** To see the shape of a tensor called `tensor`, use `tensor.shape`. If you're building neural networks, you'll be using this method often.
There are a few options here: [`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape), [`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_), and [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view).
* `weights.reshape(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)` sometimes, and sometimes a clone, as in it copies the data to another part of memory.
* `weights.resize_(a, b)` returns the same tensor with a different shape. However, if the new shape results in fewer elements than the original tensor, some elements will be removed from the tensor (but not from memory). If the new shape results in more elements than the original tensor, new elements will be uninitialized in memory. Here I should note that the underscore at the end of the method denotes that this method is performed **in-place**. Here is a great forum thread to [read more about in-place operations](https://discuss.pytorch.org/t/what-is-in-place-operation/16244) in PyTorch.
* `weights.view(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)`.
I usually use `.view()`, but any of the three methods will work for this. So, now we can reshape `weights` to have five rows and one column with something like `weights.view(5, 1)`.
> **Exercise**: Calculate the output of our little network using matrix multiplication.
```
## Solution
y = activation(torch.mm(features, weights.view(5,1)) + bias)
```
### Stack them up!
That's how you can calculate the output for a single neuron. The real power of this algorithm happens when you start stacking these individual units into layers and stacks of layers, into a network of neurons. The output of one layer of neurons becomes the input for the next layer. With multiple input units and output units, we now need to express the weights as a matrix.
<img src='assets/multilayer_diagram_weights.png' width=450px>
The first layer shown on the bottom here are the inputs, understandably called the **input layer**. The middle layer is called the **hidden layer**, and the final layer (on the right) is the **output layer**. We can express this network mathematically with matrices again and use matrix multiplication to get linear combinations for each unit in one operation. For example, the hidden layer ($h_1$ and $h_2$ here) can be calculated
$$
\vec{h} = [h_1 \, h_2] =
\begin{bmatrix}
x_1 \, x_2 \cdots \, x_n
\end{bmatrix}
\cdot
\begin{bmatrix}
w_{11} & w_{12} \\
w_{21} &w_{22} \\
\vdots &\vdots \\
w_{n1} &w_{n2}
\end{bmatrix}
$$
The output for this small network is found by treating the hidden layer as inputs for the output unit. The network output is expressed simply
$$
y = f_2 \! \left(\, f_1 \! \left(\vec{x} \, \mathbf{W_1}\right) \mathbf{W_2} \right)
$$
```
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 3 random normal variables
features = torch.randn((1, 3))
# Define the size of each layer in our network
n_input = features.shape[1] # Number of input units, must match number of input features
n_hidden = 2 # Number of hidden units
n_output = 1 # Number of output units
# Weights for inputs to hidden layer
W1 = torch.randn(n_input, n_hidden)
# Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
```
> **Exercise:** Calculate the output for this multi-layer network using the weights `W1` & `W2`, and the biases, `B1` & `B2`.
```
### Solution
h = activation(torch.mm(features, W1) + B1)
output = activation(torch.mm(h, W2) + B2)
print(output)
```
If you did this correctly, you should see the output `tensor([[ 0.3171]])`.
The number of hidden units a parameter of the network, often called a **hyperparameter** to differentiate it from the weights and biases parameters. As you'll see later when we discuss training a neural network, the more hidden units a network has, and the more layers, the better able it is to learn from data and make accurate predictions.
## Numpy to Torch and back
Special bonus section! PyTorch has a great feature for converting between Numpy arrays and Torch tensors. To create a tensor from a Numpy array, use `torch.from_numpy()`. To convert a tensor to a Numpy array, use the `.numpy()` method.
```
import numpy as np
a = np.random.rand(4,3)
a
b = torch.from_numpy(a)
b
b.numpy()
```
The memory is shared between the Numpy array and Torch tensor, so if you change the values in-place of one object, the other will change as well.
```
# Multiply PyTorch Tensor by 2, in place
b.mul_(2)
# Numpy array matches new values from Tensor
a
```
| github_jupyter |
**This notebook is an exercise in the [Pandas](https://www.kaggle.com/learn/pandas) course. You can reference the tutorial at [this link](https://www.kaggle.com/residentmario/grouping-and-sorting).**
---
# Introduction
In these exercises we'll apply groupwise analysis to our dataset.
Run the code cell below to load the data before running the exercises.
```
import pandas as pd
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
#pd.set_option("display.max_rows", 5)
from learntools.core import binder; binder.bind(globals())
from learntools.pandas.grouping_and_sorting import *
print("Setup complete.")
reviews.head()
```
# Exercises
## 1.
Who are the most common wine reviewers in the dataset? Create a `Series` whose index is the `taster_twitter_handle` category from the dataset, and whose values count how many reviews each person wrote.
```
# Your code here
reviews_written = reviews.groupby('taster_twitter_handle')['taster_twitter_handle'].size()
# Check your answer
q1.check()
q1.hint()
#q1.solution()
```
## 2.
What is the best wine I can buy for a given amount of money? Create a `Series` whose index is wine prices and whose values is the maximum number of points a wine costing that much was given in a review. Sort the values by price, ascending (so that `4.0` dollars is at the top and `3300.0` dollars is at the bottom).
```
#max_points = reviews.groupby(['price','points']).max()
best_rating_per_price = reviews.groupby('price')['points'].max().sort_index()
# Check your answer
q2.check()
q2.hint()
q2.solution()
```
## 3.
What are the minimum and maximum prices for each `variety` of wine? Create a `DataFrame` whose index is the `variety` category from the dataset and whose values are the `min` and `max` values thereof.
```
price_extremes = reviews.groupby('variety').price.agg([min,max])
# Check your answer
q3.check()
#q3.hint()
#q3.solution()
```
## 4.
What are the most expensive wine varieties? Create a variable `sorted_varieties` containing a copy of the dataframe from the previous question where varieties are sorted in descending order based on minimum price, then on maximum price (to break ties).
```
sorted_varieties = price_extremes.sort_values(by=['min','max'], ascending = False)
# Check your answer
q4.check()
q4.hint()
q4.solution()
```
## 5.
Create a `Series` whose index is reviewers and whose values is the average review score given out by that reviewer. Hint: you will need the `taster_name` and `points` columns.
```
reviewer_mean_ratings = reviews.groupby('taster_name')['points'].mean()
# Check your answer
q5.check()
#q5.hint()
#q5.solution()
```
Are there significant differences in the average scores assigned by the various reviewers? Run the cell below to use the `describe()` method to see a summary of the range of values.
```
reviewer_mean_ratings.describe()
```
## 6.
What combination of countries and varieties are most common? Create a `Series` whose index is a `MultiIndex`of `{country, variety}` pairs. For example, a pinot noir produced in the US should map to `{"US", "Pinot Noir"}`. Sort the values in the `Series` in descending order based on wine count.
```
country_variety_counts = reviews.groupby(['country', 'variety']).size().sort_values(ascending = False)
# Check your answer
q6.check()
q6.hint()
q6.solution()
```
# Keep going
Move on to the [**data types and missing data**](https://www.kaggle.com/residentmario/data-types-and-missing-values).
---
*Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/pandas/discussion) to chat with other learners.*
| github_jupyter |
1. [`Language`](#language)
1. [`Doc`](#doc)
1. [`Process`](#process)
1. [`Pipeline`](#pipeline)
1. [`MorphosyntacticFeature`](#morpho)
1. [`MorphosyntacticFeatureBundle`](#morpho-bundle)
1. [`Form`](#form)
1. [`DecisionTree`](#dt)
# `Language` <a name="language"></a>
`Language` are used to identify each language and keep track of their attributes (lat-long, 3-letter ISO code, etc.). Users do not normally need to create these themselves, though this type could be extended to distinguish dialects.
```
from cltk.core.data_types import Language
Language(
name="Classical Mongolian",
glottolog_id="",
latitude=0.0,
longitude=0.0,
dates=[],
family_id="",
parent_id="",
level="",
iso_639_3_code="cmg",
type="h",
)
from cltk.languages.glottolog import LANGUAGES
print(LANGUAGES["gmh"])
```
The following records all pre-modern languages as identified by the [Glottolog project](http://glottolog.org).
```
from pprint import pprint
pprint([(l.name, l.iso_639_3_code) for _, l in LANGUAGES.items()])
```
# `Word` <a name="word"></a>
`Word` is one of the most important objects. It contains all token-level information generated by a `Process`.
```
from cltk.core.data_types import Word
Word(index_char_start=0, index_char_stop=6, index_token=0, string="Gallia", pos="nom")
```
# `Doc` <a name="doc"></a>
`Doc` is the other of the two most important types. It too is build up each time a `Process` runs.
Token-level information is stored at `Doc.words`, while larger units of information (e.g., an original input string) are kept elsewhere here.
```
from cltk.core.data_types import Doc
Doc(raw="Gallia est omnis divisa in partes tres")
```
It contains a number of helper methods, too, which read (usually) from `.word` and return convenient data structures.
```
from cltk import NLP
cltk_nlp = NLP(language="lat")
cltk_doc = cltk_nlp.analyze(text="Gallia est omnis divisa in partes tres")
cltk_doc.tokens # List[str]
```
# `Process` <a name="process"></a>
A `Process` is a Python `class` that wraps a particular algorithm type for a particular language (e.g., Sanskrit tokenization). It is designed to invoked by the `Pipeline`, though a user may call it directly, too.
See notebook [Make custom Process and add to Pipeline](https://github.com/cltk/cltk/blob/dev/notebooks/Make%20custom%20Process%20and%20add%20to%20Pipeline.ipynb) for a demonstration.
```
from dataclasses import dataclass, field
from typing import List, Type
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Process
@dataclass
class StopsProcess(Process):
"""
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.stops.processes import StopsProcess
>>> from cltk.languages.example_texts import get_example_text
>>> lang = "lat"
>>> words = [Word(string=token) for token in split_punct_ws(get_example_text(lang))]
>>> stops_process = StopsProcess(language=lang)
>>> output_doc = stops_process.run(Doc(raw=get_example_text(lang), words=words))
>>> output_doc.words[1].string
'est'
>>> output_doc.words[1].stop
True
"""
@cachedproperty
def algorithm(self):
return Stops(iso_code=self.language).get_stopwords()
def run(self, input_doc: Doc) -> Doc:
"""Note this marks a word a stop if there is a match on
either the inflected form (``Word.string``) or the
lemma (``Word.lemma``).
"""
output_doc = deepcopy(input_doc)
stops_list = self.algorithm
for index, word_obj in enumerate(output_doc.words):
if (word_obj.string in stops_list) or (word_obj.lemma in stops_list):
word_obj.stop = True
else:
word_obj.stop = False
output_doc.words[index] = word_obj
return output_doc
```
# `Pipeline` <a name="pipeline"></a>
A `Pipeline` is a list containing the algorithms, in order, as they are to be invoked by `NLP()`.
See notebook [Make custom Process and add to Pipeline](https://github.com/cltk/cltk/blob/dev/notebooks/Make%20custom%20Process%20and%20add%20to%20Pipeline.ipynb) for a demonstration.
```
from cltk.core.data_types import Pipeline
from cltk.languages.utils import get_lang
@dataclass
class AkkadianPipeline(Pipeline):
"""Default ``Pipeline`` for Akkadian.
>>> from cltk.languages.pipelines import AkkadianPipeline
>>> a_pipeline = AkkadianPipeline()
>>> a_pipeline.description
'Pipeline for the Akkadian language.'
>>> a_pipeline.language
Language(name='Akkadian', glottolog_id='akka1240', latitude=33.1, longitude=44.1, dates=[], family_id='afro1255', parent_id='east2678', level='language', iso_639_3_code='akk', type='a')
>>> a_pipeline.language.name
'Akkadian'
>>> a_pipeline.processes[0]
<class 'cltk.tokenizers.processes.AkkadianTokenizationProcess'>
"""
description: str = "Pipeline for the Akkadian language."
language: Language = get_lang("akk")
processes: List[Type[Process]] = field(
default_factory=lambda: [AkkadianTokenizationProcess, StopsProcess]
)
```
# `MorphosyntacticFeature` <a name="morpho"></a>
This model inherits from the Python builtin `IntEnum`. A `MorphosyntacticFeature` (at [cltk/morphology/universal_dependencies_features.py]()) has been made for each [morphsyntactic tag defined by v2 of the Universal Dependencies project](https://universaldependencies.org/u/feat/all.html).
```
from enum import auto
from cltk.morphology.universal_dependencies_features import Case, Gender, MorphosyntacticFeature, Number
# 'hercule' ('by Hercules, assuredly, indeed')
# http://www.perseus.tufts.edu/hopper/text?doc=Perseus:text:1999.04.0060:entry=hercule
gender = Gender.masculine
print("Gender:", gender)
case = Case.vocative
print("Case:", case)
number = Number.singular
print("Number", number)
type(case)
print("`MorphosyntacticFeature.name`:", case.name)
print("`MorphosyntacticFeature.value`", case.value)
# Example declaration
class Case(MorphosyntacticFeature):
"""The case of a noun phrase.
see https://universaldependencies.org/u/feat/Case.html
"""
# structural cases
nominative = auto()
accusative = auto()
ergative = auto()
absolutive = auto()
# oblique cases
abessive = auto()
befefactive = auto()
causative = auto()
comparative = auto()
considerative = auto()
comitative = auto()
dative = auto()
distributive = auto()
equative = auto()
genitive = auto()
instrumental = auto()
partitive = auto()
vocative = auto()
# spatiotemporal cases
ablative = auto()
additive = auto()
adessive = auto()
allative = auto()
delative = auto()
elative = auto()
essive = auto()
illative = auto()
inessive = auto()
lative = auto()
locative = auto()
perlative = auto()
sublative = auto()
superessive = auto()
terminative = auto()
temporal = auto()
translative = auto()
# Users can learn a bit about these features, too
#help(case)
```
Note there is a distinction between POS and a word's features. POS tags are more general categories for general classes of words, like noun, verb, etc.
```
from cltk.morphology.universal_dependencies_features import POS
# so the Latin word 'hercule' would be
pos = POS.interjection
print(pos)
```
# `MorphosyntacticFeatureBundle` <a name="morpho-bundle"></a>
`MorphosyntacticFeature`s are brought together to describe a word with `MorphosyntacticFeatureBundle`.
```
from cltk.morphology.morphosyntax import MorphosyntacticFeatureBundle
bundle = MorphosyntacticFeatureBundle(case, gender, number)
print(type(bundle))
print(bundle)
# How to access features within a bundle
print("Gender", bundle["Gender"])
print("Case", bundle["Case"])
print("Number", bundle["Number"])
```
`POS` and `MorphosyntacticFeatureBundle` are intended to be placed within a `Word`instance.
```
Word(string="hercule", pos=pos, features=bundle)
```
# `Form` <a name="form"></a>
Since the `Doc.words` is a flat list of `Word` objects, it can be difficult to model hierearchical data, such as dependency trees. To help such modeling, the `Form` and `DependencyTree` types were created.
`Form` inherits from the builtin `xml` package's `Element` and `DependencyTree` from `ElementTree`.
See [Modeling syntax with Form and DependencyTree](https://github.com/cltk/cltk/blob/dev/notebooks/CLTK%20Demonstration.ipynb) for a full example.
```
from cltk.dependency.tree import Form
```
# `DecisionTree` <a name="dt"></a>
See [Modeling syntax with Form and DependencyTree](https://github.com/cltk/cltk/blob/dev/notebooks/CLTK%20Demonstration.ipynb) for a full example.
```
from cltk.dependency.tree import DependencyTree
```
| github_jupyter |
# Bulk RNA-seq eQTL analysis
This notebook provide a master control on the XQTL workflow so it can works on multiple data collection as proposed.
Input:
A recipe file,each row is a data collection and with the following column:
Theme
name of dataset, must be different, each uni_study analysis will be performed in a folder named after each, meta analysis will be performed in a folder named as {study1}_{study2}
The column name must contain the # and be the first column
genotype_list
{Path to file}
molecular_pheno
{Path to file}
region_list (list of regions to be analzed)
{Path to file}
covariate_file
{Path to file}
factor_analysis_opt
"APEX" vs "PEER" for factor analysis
LD options:
"In-sample" LD vs {path to reference panel}
QTL_tool_option
"APEX" vs "TensorQTL" for QTL association
QTL_analysis_option
{Int for cis window} vs "trans"
Populations
The populations from which of the samples was drawn
Conditions:
The nature of molecular phenotype
### note: Only data collection from the same Populations and conditions will me merged to perform Fix effect meta analysis
Output:
...
## Generation of MWE
This is the code to generate the mwe recipe and LD_recipe on csg cluster
```
Recipe_temp = pd.DataFrame( {"Theme" : ["AC","DLPFC","PCC"] ,
"genotype_list" : ["/home/hs3163/GIT/ADSPFG-xQTL/MWE/mwe_genotype_list",
"/home/hs3163/GIT/ADSPFG-xQTL/MWE/mwe_genotype_list",
"/home/hs3163/GIT/ADSPFG-xQTL/MWE/mwe_genotype_list"],
"molecular_pheno" : ["/home/hs3163/Project/Rosmap/data/gene_exp/AC/geneTpmResidualsAgeGenderAdj_rename.txt",
"/home/hs3163/Project/Rosmap/data/gene_exp/DLPFC/geneTpmResidualsAgeGenderAdj_rename.txt",
"/home/hs3163/Project/Rosmap/data/gene_exp/PCC/geneTpmResidualsAgeGenderAdj_rename.txt"],
"region_list" : ["~/GIT/ADSPFG-xQTL/MWE/mwe_region",
"~/GIT/ADSPFG-xQTL/MWE/mwe_region",
"~/GIT/ADSPFG-xQTL/MWE/mwe_region"] ,
"covariate_file" : ["/home/hs3163/GIT/ADSPFG-xQTL/MWE/MWE.cov","/home/hs3163/GIT/ADSPFG-xQTL/MWE/MWE.cov","/home/hs3163/GIT/ADSPFG-xQTL/MWE/MWE.cov"],
"factor_analysis_opt" : ["APEX","APEX","APEX"],
"LD_Recipe": ["~/GIT/ADSPFG-xQTL/MWE/LD_Recipe","~/GIT/ADSPFG-xQTL/MWE/LD_Recipe","~/GIT/ADSPFG-xQTL/MWE/LD_Recipe"],
"QTL_tool_option" : ["APEX","APEX","APEX"],
"QTL_analysis_option" : ["cis","cis","cis"],
"cis_windows" : [500000,500000,5000000],
"Metal" : ["T","T","F"]}).to_csv("/home/hs3163/GIT/ADSPFG-xQTL/MWE/mwe_recipe_example","\t")
### note: Only data collection from the same Populations and conditions will me merged to perform Fix effect meta analysis
pd.DataFrame( {"Theme" : ["AC","DLPFC","PCC"] ,
"genotype_list" : [" /mnt/mfs/statgen/ROSMAP_xqtl/Rosmap_wgs_genotype_list.txt",
" /mnt/mfs/statgen/ROSMAP_xqtl/Rosmap_wgs_genotype_list.txt",
" /mnt/mfs/statgen/ROSMAP_xqtl/Rosmap_wgs_genotype_list.txt"],
"molecular_pheno" : ["/home/hs3163/Project/Rosmap/data/gene_exp/AC/geneTpmResidualsAgeGenderAdj_rename.txt",
"/home/hs3163/Project/Rosmap/data/gene_exp/DLPFC/geneTpmResidualsAgeGenderAdj_rename.txt",
"/home/hs3163/Project/Rosmap/data/gene_exp/PCC/geneTpmResidualsAgeGenderAdj_rename.txt"],
"region_list" : ["/home/hs3163/Project/Rosmap/data/gene_exp/AC/geneTpmResidualsAgeGenderAdj_rename_region_list.txt",
"/home/hs3163/Project/Rosmap/data/gene_exp/AC/geneTpmResidualsAgeGenderAdj_rename_region_list.txt",
"/home/hs3163/Project/Rosmap/data/gene_exp/AC/geneTpmResidualsAgeGenderAdj_rename_region_list.txt"] ,
"covariate_file" : ["None","None","None"],
"factor_analysis_opt" : ["BiCV","BiCV","BiCV"],
"LD_Recipe": ["~/GIT/ADSPFG-xQTL/MWE/LD_Recipe","~/GIT/ADSPFG-xQTL/MWE/LD_Recipe","~/GIT/ADSPFG-xQTL/MWE/LD_Recipe"],
"QTL_tool_option" : ["APEX","APEX","APEX"],
"QTL_analysis_option" : ["cis","cis","cis"],
"cis_windows" : [500000,500000,500000],
"Metal" : ["T","T","F"]}).to_csv("/home/hs3163/GIT/xqtl-pipeline/ROSMAP_recipe_example","\t", index = 0)
home/hs3163/GIT/xqtl-pipeline/ROSMAP_recipe_example
pd.DataFrame({"ld_file_prefix" : ["/mnt/mfs/statgen/neuro-twas/mv_wg/cache_arch/cache/geneTpmResidualsAgeGenderAdj_rename.","/mnt/mfs/statgen/neuro-twas/mv_wg/cache_arch/cache/geneTpmResidualsAgeGenderAdj_rename."],
"ld_file_surfix" : [".merged.ld.rds",".merged.ld.rds"]}).to_csv("~/GIT/ADSPFG-xQTL/MWE/LD_Recipe",sep = "\t")
nohup sos run /home/hs3163/GIT/xqtl-pipeline/pipeline/complete_analysis/eQTL_analysis.ipynb QTL \
--recipe /home/hs3163/GIT/ADSPFG-xQTL/MWE/mwe_recipe_example \
--wd ./ \
--exe_dir "/home/hs3163/GIT/xqtl-pipeline/pipeline/" &
nohup sos dryrun /home/hs3163/GIT/xqtl-pipeline/pipeline/complete_analysis/eQTL_analysis.ipynb mash_to_vcf \
--recipe /home/hs3163/GIT/xqtl-pipeline/ROSMAP_recipe_example --wd ./ --exe_dir "~/GIT/xqtl-pipeline/pipeline/" -s build &
nohup sos dryrun /home/hs3163/GIT/xqtl-pipeline/pipeline/complete_analysis/eQTL_analysis.ipynb phenotype_reformatting_by_gene \
--recipe /home/hs3163/GIT/xqtl-pipeline/ROSMAP_recipe_example --wd ./ --exe_dir "~/GIT/xqtl-pipeline/pipeline/" -s build &
nohup sos dryrun /home/hs3163/GIT/xqtl-pipeline/pipeline/complete_analysis/eQTL_analysis.ipynb genotype_reformatting_per_gene \
--recipe /home/hs3163/GIT/xqtl-pipeline/ROSMAP_recipe_example --wd ./ --exe_dir "~/GIT/xqtl-pipeline/pipeline/" -s build &
nohup sos dryrun /home/hs3163/GIT/xqtl-pipeline/pipeline/complete_analysis/eQTL_analysis.ipynb mixture_prior \
--recipe /home/hs3163/GIT/xqtl-pipeline/ROSMAP_recipe_example --wd ./ --exe_dir "~/GIT/xqtl-pipeline/pipeline/" -s build &
nohup sos run ~/GIT/bioworkflows/GWAS/PCA.ipynb flashpca \
--genoFile /mnt/mfs/statgen/xqtl_workflow_testing/ROSMAP/data_preprocessing/genotype/qc/PCC.mergrd.filtered.prune.unrelated.bed \
--name PCC \
--container_lmm /mnt/mfs/statgen/containers/xqtl_pipeline_sif/flashpcaR.sif \
--cwd /mnt/mfs/statgen/xqtl_workflow_testing/demo/test_pca/ \
-J 200 -q csg -c /home/hs3163/GIT/ADSPFG-xQTL/code/csg.yml &
nohup sos run ~/GIT/bioworkflows/GWAS/PCA.ipynb project_samples:1 \
--genoFile /mnt/mfs/statgen/xqtl_workflow_testing/ROSMAP/data_preprocessing/genotype/qc/PCC.mergrd.filtered.prune.related.bed \
--pca_model /mnt/mfs/statgen/xqtl_workflow_testing/demo/test_pca/PCC.mergrd.filtered.prune.unrelated.PCC.pca.rds \
--name PCC \
--container_lmm /mnt/mfs/statgen/containers/xqtl_pipeline_sif/flashpcaR.sif \
--cwd /mnt/mfs/statgen/xqtl_workflow_testing/demo/test_pca/ \
-J 200 -q csg -c /home/hs3163/GIT/ADSPFG-xQTL/code/csg.yml &
```
## Example for running the workflow
This will run the workflow from via several submission and save the output to nohup.out
## Other example workflow:
These command run each of the substep to test them individually
```
[global]
## The aforementioned input recipe
parameter: recipe = path
## Overall wd, the file structure of analysis is wd/[steps]/[sub_dir for each steps]
parameter: wd = path(".")
## Diretory to the excutable
parameter: exe_dir = path("~/GIT/ADSPFG-xQTL/workflow")
parameter: container = '/mnt/mfs/statgen/containers/twas_latest.sif'
parameter: container_base_bioinfo = '/mnt/mfs/statgen/containers/xqtl_pipeline_sif/base-bioinfo.sif'
parameter: container_apex = '/mnt/mfs/statgen/containers/xqtl_pipeline_sif/apex.sif'
parameter: container_PEER = '/mnt/mfs/statgen/containers/xqtl_pipeline_sif/PEER.sif'
parameter: container_TensorQTL = '/mnt/mfs/statgen/containers/xqtl_pipeline_sif/TensorQTL.sif'
parameter: container_mvsusie = '/mnt/mfs/statgen/containers/twas_latest.sif'
parameter: container_METAL = '/mnt/mfs/statgen/containers/xqtl_pipeline_sif/METAL.sif'
parameter: container_flashpca = '/mnt/mfs/statgen/containers/xqtl_pipeline_sif/flashpcaR.sif'
parameter: yml = "/home/hs3163/GIT/ADSPFG-xQTL/code/csg.yml"
import pandas as pd
input_inv = pd.read_csv(recipe, sep = "\t")
Metal_theme = input_inv.query("Metal == 'T'")["Theme"].to_list()
Metal_theme_str = "-".join(Metal_theme)
Non_Metal_theme = input_inv.query("Metal != 'T'")["Theme"].to_list()
Non_Metal_theme.append(Metal_theme_str)
Theme_Prefix = "_".join(Non_Metal_theme)
parameter: LD_Recipe = path(input_inv["LD_Recipe"][0])
input_inv = input_inv.to_dict("records")
import os
```
## Molecular Phenotype Calling
## Data Preprocessing
### Molecular Phenotype Processing
```
#[Normalization]
#import os
#input: for_each = "input_inv"
#skip_if( os.path.exists(_input_inv["molecular_pheno"]))
#output: f'{wd:a}/data_preprocessing/normalization/{name}.mol_phe.bed.gz'
#bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
# sos run $[exe_dir]/data_preprocessing/phenotype/GWAS_QC.ipynb output \
# --counts_gct $[_input_inv["genecount_table"]] \
# --tpm_gct $[_input_inv["geneTpm_table"]] \
# --sample_participant_lookup $[_input_inv["sample_index"]] \
# --vcf_chr_list $[_input_inv["vcf_chr_list"]] \
# --container $[container_gtex] \
# --name $[_input_inv["Theme"]] \
# --wd $[wd:a]/data_preprocessing/normalization/ \
# --container $[container_base_bioinfo] \
# -J 200 -q csg -c $[yml] &
[annotation]
## Must be ran with internet connection
import os
input: for_each = "input_inv"
output: f'{wd:a}/data_preprocessing/annotation/{_input_inv["Theme"]}.{path(_input_inv["molecular_pheno"]):bn}.annotated.bed.gz'
bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/data_preprocessing/phenotype/annotation.ipynb annotation \
--molecular_pheno_whole $[_input_inv["molecular_pheno"]] \
--wd $[wd:a]/data_preprocessing/annotation \
--name $[_input_inv["Theme"]] --container $[container_base_bioinfo] -s build &
[phenotype_reformatting]
input: output_from("residual_phenotype"),group_with = "input_inv"
output: per_chrom_pheno_list = f'{wd:a}/data_preprocessing/phenotype_reformat/{_input_inv["Theme"]}.processed_phenotype.per_chrom.recipe',
pheno_mod = f'{wd:a}/data_preprocessing/phenotype_reformat/{_input_inv["Theme"]}.for_pca.mol_phe.exp'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/phenotype/phenotype_formatting.ipynb reformat \
--molecular_pheno_whole $[_input] \
--region_list $[_input_inv["region_list"]] \
--wd $[wd:a]/data_preprocessing/phenotype_reformat/ \
--name $[_input_inv["Theme"]] --container $[container_base_bioinfo] \
-J 200 -q csg -c $[yml]
```
#### The reformatiing by gene is particularly lenghthy, so to avoid exceesive waiting time, it is set to be a seperate substep
```
[phenotype_reformatting_by_gene]
input: output_from("residual_phenotype"),group_with = "input_inv"
output: per_gene_pheno_list = f'{wd:a}/data_preprocessing/phenotype_reformat/{_input_inv["Theme"]}.processed_phenotype.per_gene.recipe'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/phenotype/phenotype_formatting.ipynb partition_by_gene \
--molecular_pheno_whole $[_input] \
--region_list $[_input_inv["region_list"]] \
--wd $[wd:a]/data_preprocessing/phenotype_reformat/ \
--name $[_input_inv["Theme"]] --container $[container_base_bioinfo] \
-J 200 -q csg -c $[yml]
```
### Genotype Processing
```
[genotype_QC]
input: for_each = "input_inv"
output: merged_plink = f'{wd:a}/data_preprocessing/genotype/qc/{_input_inv["Theme"]}.mergrd.filtered.prune.bed',
unrelated = f'{wd:a}/data_preprocessing/genotype/qc/{_input_inv["Theme"]}.mergrd.filtered.prune.unrelated.bed',
related = f'{wd:a}/data_preprocessing/genotype/qc/{_input_inv["Theme"]}.mergrd.filtered.prune.related.bed'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/genotype/GWAS_QC.ipynb qc \
--genotype_list $[_input_inv["genotype_list"]] \
--name $[_input_inv["Theme"]] \
--container_lmm $[container_base_bioinfo] \
--cwd $[wd:a]/data_preprocessing/genotype/qc/ \
-J 200 -q csg -c $[yml]
[genotype_reformatting]
import pandas as pd
input: output_from("genotype_QC")["merged_plink"], group_with = "input_inv"
name = _input_inv["Theme"]
output: vcf_list = f'{wd}/data_preprocessing/genotype/{name}_per_chrom_vcf/{name}.vcf_chrom_list.txt',
per_chrom_plink_list = f'{wd}/data_preprocessing/genotype/{name}_per_chrom_plink/{name}.plink_chrom_list.txt'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/genotype/genotype_formatting.ipynb plink2vcf \
--genoFile $[_input] \
--name $[_input_inv["Theme"]] \
--container $[container_base_bioinfo] \
--region_list $[_input_inv["region_list"]] \
--wd $[wd:a]/data_preprocessing/genotype/ \
-J 200 -q csg -c /home/hs3163/GIT/ADSPFG-xQTL/code/csg.yml
sos run $[exe_dir]/data_preprocessing/genotype/genotype_formatting.ipynb plink_by_chrom \
--genoFile $[_input] \
--name $[_input_inv["Theme"]] \
--region_list $[_input_inv["region_list"]] \
--container $[container_base_bioinfo] \
--wd $[wd:a]/data_preprocessing/genotype/ \
-J 200 -q csg -c $[yml]
```
#### The reformatiing by gene is particularly lenghthy, so to avoid exceesive waiting time, it is set to be a seperate substep
```
[genotype_reformatting_per_gene]
import pandas as pd
input: output_from("genotype_QC")["merged_plink"], group_with = "input_inv"
name = _input_inv["Theme"]
output: per_gene_plink = f'{wd}/data_preprocessing/genotype/{name}_per_gene_plink/{name}.plink_gene_list.txt'
bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/data_preprocessing/genotype/genotype_formatting.ipynb plink_by_gene \
--genoFile $[_input] \
--name $[_input_inv["Theme"]] \
--region_list $[_input_inv["region_list"]] \
--container $[container_base_bioinfo] \
--region_list $[_input_inv["region_list"]] \
--wd $[wd:a]/data_preprocessing/genotype/ \
-J 2000 -q csg -c $[yml]
[LD]
import pandas as pd
input: output_from("genotype_reformatting")["per_gene_plink"],group_with = "input_inv"
output: f'{wd}/data_preprocessing/genotype/LD/{_input_inv["Theme"]}._LD_recipe'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/genotype/LD.ipynb LD \
--genotype_list $[_input] \
--name $[_input_inv["Theme"]] \
--container $[container_base_bioinfo] \
--wd $[wd:a]/data_preprocessing/genotype/LD/ \
-J 200 -q csg -c $[yml]
[LD_Recipe]
input: output_from("LD"), group_by = "all"
output: f'{wd:a}/data_preprocessing/genotype/LD/sumstat_list'
python: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
import pandas as pd
input_list = [$[_input:r,]]
ld_recipe = pd.read_csv(input_list[0],sep = "\t")
for x in range(1,len(input_list)):
ld_recipe = ld_recipe.append(pd.read_csv(input_list[x],sep = "\t"))
ld_recipe.to_csv("$[_output]", index = 0 , sep = "\t")
[GRM]
import pandas as pd
input: output_from("genotype_reformatting")["per_chrom_plink_list"],group_with = "input_inv"
output: f'{wd}/data_preprocessing/genotype/grm/{_input_inv["Theme"]}.grm_list.txt'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/genotype/GRM.ipynb GRM \
--genotype_list $[_input] \
--name $[_input_inv["Theme"]] \
--container $[container_base_bioinfo] \
--wd $[wd:a]/data_preprocessing/genotype/grm/ \
-J 200 -q csg -c $[yml]
```
## Factor analysis
```
[factor]
input: output_from("genotype_reformatting")["vcf_list"],output_from("annotation"),group_with = "input_inv"
output: f'{wd}/data_preprocessing/covariate/{_input_inv["Theme"]}.{_input_inv["factor_analysis_opt"]}.cov'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/covariate/$[_input_inv["factor_analysis_opt"]]_factor.ipynb $[_input_inv["factor_analysis_opt"]] \
--molecular_pheno $[_input[1]] \
--genotype_list $[_input[0]] \
--name $[_input_inv["Theme"]] \
--wd $[wd:a]/data_preprocessing/covariate/ \
-J 200 -q csg -c $[yml] $[f'--covariate {_input_inv["covariate_file"]}' if os.path.exists(_input_inv["covariate_file"]) else f''] \
--container $[container_apex if _input_inv["factor_analysis_opt"] == "BiCV" else container_PEER]
[residual_phenotype]
input: output_from("factor"), output_from("annotation"),group_with = "input_inv"
output: f'{wd}/data_preprocessing/phenotype/{_input_inv["Theme"]}.mol_phe.resid.bed.gz'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/covariate/remove_covariates.ipynb Residual_Y \
--molecular_pheno_whole $[_input[1]] \
--factor $[_input[0]] \
--wd $[wd]/data_preprocessing/phenotype \
--name $[_input_inv["Theme"]] --container $[container_base_bioinfo] \
-J 200 -q csg -c $[yml]
[pca]
import pandas as pd
input: output_from("genotype_QC")["related"],output_from("genotype_QC")["unrelated"],group_with = "input_inv"
output: f'{wd}/data_preprocessing/covariate/pca/{_input[0]:bn}.{_input_inv["Theme"]}.pca.projected.rds'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/covariate/PCA.ipynb flashpca \
--genoFile $[_input[1]] \
--name $[_input_inv["Theme"]] \
--container_lmm $[container_flashpca] \
--cwd $[wd:a]/data_preprocessing/covariate/pca/ \
-J 200 -q csg -c $[yml]
sos run $[exe_dir]/data_preprocessing/covariate/PCA.ipynb project_samples:1 \
--genoFile $[_input[0]] \
--pca_model $[wd:a]/data_preprocessing/covariate/pca/$[_input[1]:bn].$[_input_inv["Theme"]].pca.rds \
--name $[_input_inv["Theme"]] \
--container_lmm $[container_flashpca] \
--cwd $[wd:a]/data_preprocessing/covariate/pca/ \
-J 200 -q csg -c $[yml]
[pca_factor_merge]
import pandas as pd
input: output_from("pca"),output_from("factor"),group_with = "input_inv"
output: f'{wd}/data_preprocessing/covariate/{_input[1]:bn}.pca.cov'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/data_preprocessing/covariate/merge_covariate.ipynb pca_factor_merge \
--factor_and_covariate $[_input[1]] \
--PC $[_input[0]] \
--container $[container_base_bioinfo] \
--wd $[wd:a]/data_preprocessing/covariate/ \
-J 200 -q csg -c $[yml]
```
## QTL associations
```
[QTL_1]
input: output_from("pca_factor_merge"),output_from("GRM"),output_from("phenotype_reformatting")["per_chrom_pheno_list"],output_from("genotype_reformatting")["vcf_list"], output_from("genotype_reformatting")["per_chrom_plink_list"] ,group_with = "input_inv"
output: f'{wd:a}/association_scan/{_input_inv["QTL_tool_option"]}/{_input_inv["QTL_analysis_option"]}/{_input_inv["Theme"]}.{_input_inv["QTL_tool_option"]}_QTL_recipe.tsv'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/association_scan/$[_input_inv["QTL_tool_option"]]/$[_input_inv["QTL_tool_option"]].ipynb $[_input_inv["QTL_tool_option"]]_$[_input_inv["QTL_analysis_option"]] \
--molecular_pheno_list $[_input[2]] \
--covariate $[_input[0]]\
--genotype_file_list $[_input[3]] \
--container $[container_apex if _input_inv["QTL_tool_option"] == "APEX" else container_TensorQTL] \
--window $[_input_inv["cis_windows"]] \
--name $[_input_inv["Theme"]] \
--wd $[wd:a]/association_scan/$[_input_inv["QTL_tool_option"]]/$[_input_inv["QTL_analysis_option"]]/ \
-J 200 -q csg -c $[yml] $[f'--grm_list {_input[1]}' if _input_inv["QTL_tool_option"] == "APEX" else f'']
```
#### Example:
sos run /home/hs3163/GIT/ADSPFG-xQTL/workflow/QTL_association/QTL_association.ipynb APEX_cis_Recipe \
--recipe data_preprocessing/PCC.data_proc_output_recipe.tsv \
--container /mnt/mfs/statgen/containers/apex.sif \
--window 500000 \
--name PCC \
--wd /mnt/mfs/statgen/xqtl_workflow_testing/testing_no_cov/QTL_association/ \
-J 200 -q csg -c /home/hs3163/GIT/ADSPFG-xQTL/code/csg.yml
```
[QTL_2]
input: group_by = "all"
output: f'{_input[0]:d}/sumstat_list'
python: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
import pandas as pd
input_list = [$[_input:r,]]
input_inv = $[input_inv]
sumstat_list = pd.read_csv(input_list[0],sep = "\t")
sumstat_list = sumstat_list.sort_values('#chr')
for x in range(1,len(input_list)):
sumstat_list = sumstat_list.merge(pd.read_csv(input_list[x],sep = "\t"), on = "#chr")
sumstat_list.columns = ["#chr"] + pd.DataFrame(input_inv)["Theme"].values.tolist()
sumstat_list.to_csv("$[_output]", index = 0 , sep = "\t")
```
## Meta Analysis
Input:
1. A recipe generated from the combination of previouse steps
Output:
1. Recipe for Prior, Vhat, rds input, resid corr
3. vcf
```
[METAL]
input: output_from("QTL_2")
METAL_sumstat_list = f'{_input}.METAL.tsv'
sumstat_list = pd.read_csv(_input,sep = "\t")[["#chr"] + Metal_theme].to_csv(METAL_sumstat_list,sep = "\t", index = 0)
output: f'{wd}/multivariate/METAL/{Metal_theme_str}.METAL_list.txt'
##task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/multivariate/METAL/METAL.ipynb METAL \
--sumstat_list_path $[METAL_sumstat_list] \
--wd $[wd:a]/multivariate/METAL/ --container $[container_METAL] \
-J 200 -q csg -c $[yml]
```
## MASH
```
[sumstat_merger_1]
parameter: sumstat_list = f'{wd}/multivariate/METAL/{Metal_theme_str}.METAL_list.txt'
input: output_from("QTL_2")
output: yml_list = f'{wd}/multivariate/MASH/Prep/yml_list.txt',
qced_sumstat_list = f'{wd}/multivariate/MASH/Prep/qc_sumstat_list.txt'
##task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/misc/yml_generator.ipynb yml_list \
--sumstat_list_path $[_input] \
--wd $[wd:a]/multivariate/MASH/Prep/ --container $[container_base_bioinfo]
sos run $[exe_dir]/misc/summary_stats_merger.ipynb \
--yml_list $[_output[0]] \
--cwd $[wd:a]/multivariate/MASH/Prep/ --container $[container_base_bioinfo] --keep_ambiguous True \
-J 200 -q csg -c $[yml]
[sumstat_merger_2]
input: named_output("qced_sumstat_list")
name = "_".join(pd.DataFrame(input_inv)["Theme"].values.tolist())
output: f'{wd}/multivariate/MASH/Prep/merge/RDS/{name}.analysis_unit'
##task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/multivariate/MASH/sumstat_processing.ipynb processing \
--sumstat_list_path $[_input] \
--region_list $[input_inv[0]["region_list"]] \
--wd $[wd:a]/multivariate/MASH/Prep/ --container $[container_base_bioinfo] \
-J 2000 -q csg -c $[yml]
[extract_effect]
input: output_from("sumstat_merger")
name = "_".join(pd.DataFrame(input_inv)["Theme"].values.tolist())
output: f'{wd}/multivariate/MASH/Prep/{name}.rds'
bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/multivariate/MASH/Signal_Extraction.ipynb extract_effects \
--cwd $[wd:a]/multivariate/MASH/Prep/ \
--container $[container_base_bioinfo] \
--name $[name] \
--analysis_units $[_input] \
-J 2000 -q csg -c $[yml]
[mash_model]
input: output_from("extract_effect")
name = "_".join(pd.DataFrame(input_inv)["Theme"].values.tolist())
output: MASH_model = f"{wd}/multivariate/MASH/{name}.EZ.V_simple.mash_model.rds",
resid_corr = f"{wd}/multivariate/MASH/{name}.EZ.V_simple.rds"
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/multivariate/MASH/mashr.ipynb mash \
--cwd $[wd:a]/multivariate/MASH/ \
--container $[container_mvsusie] \
--output_prefix $[name] \
--data $[_input] \
-J 200 -q csg -c $[yml]
[mash_posterior]
input: output_from("mash_model")["MASH_model"], output_from("sumstat_merger")
name = "_".join(pd.DataFrame(input_inv)["Theme"].values.tolist())
parameter: analysis_unit = _input[1]
output: f'{wd}/multivariate/MASH/mash_output_list'
bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/multivariate/MASH/mashr.ipynb posterior \
--cwd $[wd:a]/multivariate/MASH/ \
--container $[container_mvsusie] \
--output_prefix $[name] \
--analysis_units $[analysis_unit] \
-J 2000 -q csg -c $[yml]
[mash_to_vcf]
input: output_from("mash_posterior")
name = "_".join(pd.DataFrame(input_inv)["Theme"].values.tolist())
output: f'{wd}/multivariate/MASH/mash_vcf/vcf_output_list.txt'
bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/misc/rds_to_vcf.ipynb rds_to_vcf \
--wd $[wd:a]/multivariate/MASH/ \
--name $[name] \
--analysis_units $[_input] \
-J 2000 -q csg -c $[yml]
```
## Fine mapping
```
[mixture_prior]
input: output_from("mash_model")["MASH_model"], output_from("extract_effect")
name = "_".join(pd.DataFrame(input_inv)["Theme"].values.tolist())
output: f'{wd}/fine_mapping/mixture_prior/{name}.ed_bovy.V_simple.rds'
bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/multivariate/MASH/mixture_prior.ipynb ed_bovy \
--cwd $[wd:a]/fine_mapping/mixture_prior/ \
--container $[container_mvsusie] \
--name $[name] \
--data $[_input[1]] \
--mixture_components_dir $[_input[0]:d] \
-J 200 -q csg -c $[yml]
nohup sos run /home/hs3163/GIT/xqtl-pipeline/pipeline/multivariate/MASH/mixture_prior.ipynb ed_bovy --model_data fine_mapping/mixture_prior/AC_DLPFC_PCC.ed_bovy.V_simple.rds --cwd ./ --container /mnt/mfs/statgen/containers/twas_latest.sif --name AC_DLPFC_PCC --data multivariate/MASH/Prep/AC_DLPFC_PCC.rds --mixture_components_dir multivariate/MASH -J 200 -q csg -c /home/hs3163/GIT/ADSPFG-xQTL/code/csg.yml &
[mvsusie_rss]
input: output_from("mixture_prior"), output_from("sumstat_merger"), output_from("mash_model")["resid_corr"]
name = "_".join(pd.DataFrame(input_inv)["Theme"].values.tolist())
parameter: analysis_unit = _input[1]
output: f'{wd:a}/fine_mapping/mvsusie_rss/{name}.mvsusie_rss.output_list.txt'
bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/fine_mapping/SuSiE/SuSiE_RSS.ipynb MvSuSiE_summary_stats_analysis \
--merged_analysis_unit $[analysis_unit] \
--resid_cor $[_input[2]] \
--prior $[_input[0]] \
--LD_Recipe /home/hs3163/GIT/ADSPFG-xQTL/MWE/LD_Recipe \
--container $[container_mvsusie] \
--wd $[wd:a]/fine_mapping/mvsusie_rss/ \
--Theme_prefix $[name] -J 200 -q csg -c $[yml]
nohup sos run /home/hs3163/GIT/xqtl-pipeline/pipeline/fine_mapping/SuSiE/SuSiE_RSS.ipynb MvSuSiE_summary_stats_analysis \
--merged_analysis_unit /mnt/mfs/statgen/xqtl_workflow_testing/ROSMAP/showcase_gene \
--resid_cor multivariate/MASH/AC_DLPFC_PCC.EZ.V_simple.rds \
--prior /mnt/mfs/statgen/xqtl_workflow_testing/ROSMAP/fine_mapping/mixture_prior/AC_DLPFC_PCC.ed_bovy.V_simple.rds \
--LD_Recipe /home/hs3163/GIT/ADSPFG-xQTL/MWE/LD_Recipe \
--container /mnt/mfs/statgen/containers/twas_latest.sif \
--wd /mnt/mfs/statgen/xqtl_workflow_testing/ROSMAP/fine_mapping/mvsusie_rss/ \
--Theme_prefix AC_DLPFC_PCC -J 200 -q csg -c /home/hs3163/GIT/ADSPFG-xQTL/code/csg.yml -s build &
[unisusie_rss]
input: output_from("mixture_prior"), output_from("sumstat_merger"), output_from("mash_model")["resid_corr"]
name = "_".join(pd.DataFrame(input_inv)["Theme"].values.tolist())
parameter: analysis_unit = _input[1]
output: f'{wd:a}/fine_mapping/unisusie_rss/{name}.unisusie_rss.output_list.txt'
bash: expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/fine_mapping/SuSiE/SuSiE_RSS.ipynb UniSuSiE_summary_stats_analysis \
--merged_analysis_unit $[analysis_unit] \
--LD_Recipe /home/hs3163/GIT/ADSPFG-xQTL/MWE/LD_Recipe \
--container $[container_mvsusie] \
--wd $[wd:a]/fine_mapping/unisusie_rss/ \
--Theme_prefix $[name]
sos run /home/hs3163/GIT/xqtl-pipeline/pipeline/multivariate/MASH/mashr.ipynb mash \
--cwd ./ \
--container /mnt/mfs/statgen/containers/xqtl_pipeline_sif/mvsusie.sif \
--output_prefix AC_DLPFC_PCC \
--data /mnt/mfs/statgen/xqtl_workflow_testing/ROSMAP/multivariate/MASH/Prep/AC_DLPFC_PCC.rds \
-J 200 -q csg -c /home/hs3163/GIT/ADSPFG-xQTL/code/csg.yml &
[unisusie]
input: output_from("phenotype_reformatting_per_gene"),output_from("genotype_reformatting_per_gene"), group_with = "input_inv"
output: f'{wd:a}/fine_mapping/unisusie/{name}/{name}.unisusie.output_list.txt'
#task: trunk_workers = 1, trunk_size = 1, walltime = '24h', mem = '40G', tags = f'{step_name}_{_output[0]:bn}'
bash: expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/fine_mapping/SuSiE/SuSiE.ipynb UniSuSiE_summary_stats_analysis uni_susie \
--phenotype_list $[_input[0]] \
--genotype_list $[_input[1]] \
--container $[container_mvsusie] \
--region_list $[_input_inv["region_list"]] \
--name $[_input_inv["Theme"]] \
--wd $[wd:a]/fine_mapping/unisusie/$[name]/ \
-J 200 -q csg -c $[yml] &
```
| github_jupyter |
<a href="https://colab.research.google.com/github/airctic/icevision-gradio/blob/master/IceApp_pets.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# IceVision Deployment App Example: PETS Dataset
This example uses Faster RCNN trained weights using the [PETS dataset](https://airctic.github.io/icedata/pets/)
[IceVision](https://github.com/airctic/IceVision) features:
✔ Data curation/cleaning with auto-fix
✔ Exploratory data analysis dashboard
✔ Pluggable transforms for better model generalization
✔ Access to hundreds of neural net models (Torchvision, MMDetection, EfficientDet, Timm)
✔ Access to multiple training loop libraries (Pytorch-Lightning, Fastai)
✔ Multi-task training to efficiently combine object
detection, segmentation, and classification models
## Installing packages
```
!wget https://raw.githubusercontent.com/airctic/icevision/master/install_icevision_inference.sh
!bash install_icevision_inference.sh colab
!echo "- Installing gradio"
!pip install gradio -U -q
# Restart kernel
import IPython
IPython.Application.instance().kernel.do_shutdown(True)
```
## Imports
```
from icevision.all import *
import icedata
import PIL, requests
import torch
from torchvision import transforms
import gradio as gr
```
## Loading trained model
```
_CLASSES = sorted(
{
"Abyssinian",
"great_pyrenees",
"Bombay",
"Persian",
"samoyed",
"Maine_Coon",
"havanese",
"beagle",
"yorkshire_terrier",
"pomeranian",
"scottish_terrier",
"saint_bernard",
"Siamese",
"chihuahua",
"Birman",
"american_pit_bull_terrier",
"miniature_pinscher",
"japanese_chin",
"British_Shorthair",
"Bengal",
"Russian_Blue",
"newfoundland",
"wheaten_terrier",
"Ragdoll",
"leonberger",
"english_cocker_spaniel",
"english_setter",
"staffordshire_bull_terrier",
"german_shorthaired",
"Egyptian_Mau",
"boxer",
"shiba_inu",
"keeshond",
"pug",
"american_bulldog",
"basset_hound",
"Sphynx",
}
)
class_map = ClassMap(_CLASSES)
class_map
# Loading model from IceZoo (IceVision Hub)
model = icedata.pets.trained_models.faster_rcnn_resnet50_fpn()
# Transforms
image_size = 384
valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(image_size), tfms.A.Normalize()])
```
## Defining the `show_preds` method: called by `gr.Interface(fn=show_preds, ...)`
```
# Setting the model type: used in end2end_detect() method here below
model_type = models.torchvision.faster_rcnn
def show_preds(input_image, display_label, display_bbox, detection_threshold):
if detection_threshold==0: detection_threshold=0.5
img = PIL.Image.fromarray(input_image, 'RGB')
pred_dict = model_type.end2end_detect(img, valid_tfms, model, class_map=class_map, detection_threshold=detection_threshold,
display_label=display_label, display_bbox=display_bbox, return_img=True,
font_size=40, label_color="#FF59D6")
return pred_dict['img']
```
## Gradio User Interface
```
display_chkbox_label = gr.inputs.Checkbox(label="Label", default=True)
display_chkbox_box = gr.inputs.Checkbox(label="Box", default=True)
detection_threshold_slider = gr.inputs.Slider(minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold")
outputs = gr.outputs.Image(type="pil")
gr_interface = gr.Interface(fn=show_preds, inputs=["image", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - PETS')
gr_interface.launch(inline=False, share=True, debug=True)
```
| github_jupyter |
My beloved [SF Tsunami Master Team](http://sftsunami.org/) had planned a great picnic for this week end. For the second year in a row the plan had to be canceled due to inclement weather.
I admit I sneered at the idea of having the picnic the same month as last year, considering that it got canceled once. However, forming opinions based on a sample size of two with a sprinkle of gut feeling is not the way a Scientist does things, so I thought it would be interesting and constructive to pull some data to validate or disprove my prejudice and to provide a valid alternative.
My hypothesis is quite simple: in April chances of rain are way higher than in May, while temperatures are pretty constant, so the latter would be a better option to plan outdoor activities.
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import pandas as pd
import urllib.request
from bs4 import BeautifulSoup
import matplotlib
from matplotlib import pyplot as plt
plt.style.use('ggplot')
matplotlib.rcParams['figure.figsize'] = (20.0, 10.0)
```
With a quick Google search I ran into [this](ggweather.com) site which reports monthly and daily information about temperature and precipitations.
The format is quite easy to scrape. The data I'm interested in are the monthly average temperatures and the number of rainy days per month.
```
class PicNicPlanner(object):
RAIN_URL = 'http://ggweather.com/sf/daily.html'
TEMP_URL = 'http://ggweather.com/sf/monthly%20mean%20temps.html'
def __init__(self):
self.rain_table = None
self.temperature_table = None
def _read_soup(self, url, split='\t'):
flob = urllib.request.urlopen(url)
s = flob.read()
flob.close()
soup = BeautifulSoup(s, "lxml")
return [s for s in soup.findAll('table')[1].get_text().split(split) if len(s)>0]
def _clean_rain(self, row):
return pd.Series(row.strip().split('\n')[1:]).astype(float)
def get_rains(self):
if self.rain_table is None:
raw_rows = self._read_soup(self.RAIN_URL, '\xa0')
cleaned_rows = pd.concat(
[self._clean_rain(row) for row in raw_rows if 'Days' in row and 'Rain' not in row],
axis=1)
cleaned_rows.index = ['Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun']
self.rain_table = cleaned_rows.transpose()
self.rain_table.index = list(range(2008,2018))
return self.rain_table
def _clean_temperatures(self, row):
if len(row) > 1 and not (
'Copyright' in row or
'Reproduction' in row or
'San Francisco' in row):
return pd.Series(row.strip().split('\n'))
def get_temperatures(self):
if self.temperature_table is None:
raw_rows = self._read_soup(self.TEMP_URL)
cleaned_rows = pd.concat([self._clean_temperatures(row)
for row in raw_rows[2:]],axis=1)
cleaned_rows.columns = cleaned_rows.iloc[0]
cleaned_rows = cleaned_rows.drop(0).dropna(axis=0)
cleaned_rows.index = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Annual']
self.temperature_table = cleaned_rows.transpose()
self.temperature_table = self.temperature_table.astype(float)
return self.temperature_table
def num_days(month):
if month in ['Nov', 'Apr', 'Jun', 'Sep']:
return 30
if month == 'Feb':
return 28
return 31
planner = PicNicPlanner()
temp = planner.get_temperatures()
rains = planner.get_rains()
```
The data seems to
So, it turns out that the data confirms that having a picnic in SF in April is probably not the best idea if you want to frolic in the Sun, while your chances of having a successful event in May are almost **three times higher!** In the figure belowwe can see how April has a 23% chances of rain! Basically one day out of 4. As late as November we can have better conditions than in April, and yet I doubt people would consider reasonable to organize a picnic in November.
```
fig, axes = plt.subplots(nrows=2, ncols=1)
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
#axes[0].set_title('Average Rainy Days by Month')
axes[0].set_title('Changes of Rain by Month')
axes[1].set_title('Monthly Average Temperatures')
axes[0].axhline(10, color='r', linestyle='--')
(100*rains.mean() / rains.columns.map(num_days))[months].plot(kind='bar', ax=axes[0], sharex=True, color='#000099')
axes[0].set_ylabel('Days')
temp.mean()[months].plot(kind='bar', ax=axes[1], color='#7f0000')
axes[1].set_ylabel('Hours')
axes[1].set_xlabel('Month')
axes[1].set_ylim([50,65]) # never too hot, never too warm
pd.DataFrame({'Daily Chances of Rain': 100*rains.mean() / rains.columns.map(num_days)}).loc[months]
```
The takehome message should be:
- April is too soon to plan a picnic
- May is quite dry
- SF's weather is good enough to allow you to hang out outside as late as November!
[source](https://github.com/mrpozzi/mrpozzi.github.io/blob/master/notebooks/PicnicInSF.ipynb)
| github_jupyter |
# Probability
Think Bayes, Second Edition
Copyright 2020 Allen B. Downey
License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
The foundation of Bayesian statistics is Bayes's Theorem, and the foundation of Bayes's Theorem is conditional probability.
In this chapter, we'll start with conditional probability, derive Bayes's Theorem, and demonstrate it using a real dataset. In the next chapter, we'll use Bayes's Theorem to solve problems related to conditional probability. In the chapters that follow, we'll make the transition from Bayes's Theorem to Bayesian statistics, and I'll explain the difference.
## Linda the Banker
To introduce conditional probability, I'll use an example from a [famous experiment by Tversky and Kahneman](https://en.wikipedia.org/wiki/Conjunction_fallacy), who posed the following question:
> Linda is 31 years old, single, outspoken, and very bright. She majored in philosophy. As a student, she was deeply concerned with issues of discrimination and social justice, and also participated in anti-nuclear demonstrations. Which is more probable?
> 1. Linda is a bank teller.
> 2. Linda is a bank teller and is active in the feminist movement.
Many people choose the second answer, presumably because it seems more consistent with the description. It seems uncharacteristic if Linda is *just* a bank teller; it seems more consistent if she is also a feminist.
But the second answer cannot be "more probable", as the question asks. Suppose we find 1000 people who fit Linda's description and 10 of them work as bank tellers. How many of them are also feminists? At most, all 10 of them are; in that case, the two options are *equally* probable. If fewer than 10 are, the second option is *less* probable. But there is no way the second option can be *more* probable.
If you were inclined to choose the second option, you are in good company. The biologist [Stephen J. Gould wrote](https://sci-hub.tw/https://doi.org/10.1080/09332480.1989.10554932) :
> I am particularly fond of this example because I know that the [second] statement is least probable, yet a little [homunculus](https://en.wikipedia.org/wiki/Homunculus_argument) in my head continues to jump up and down, shouting at me, "but she can't just be a bank teller; read the description."
If the little person in your head is still unhappy, maybe this chapter will help.
## Probability
At this point I should provide a definition of "probability", but that [turns out to be surprisingly difficult](https://en.wikipedia.org/wiki/Probability_interpretations). To avoid getting stuck before we start, we will use a simple definition for now and refine it later: A **probability** is a fraction of a finite set.
For example, if we survey 1000 people, and 20 of them are bank tellers, the fraction that work as bank tellers is 0.02 or 2\%. If we choose a person from this population at random, the probability that they are a bank teller is 2\%.
By "at random" I mean that every person in the dataset has the same chance of being chosen.
With this definition and an appropriate dataset, we can compute probabilities by counting.
To demonstrate, I'll use data from the [General Social Survey](http://gss.norc.org/) (GSS).
The following cell downloads the data.
```
# Load the data file
import os
if not os.path.exists('gss_bayes.csv'):
!wget https://github.com/AllenDowney/BiteSizeBayes/raw/master/gss_bayes.csv
```
I'll use Pandas to read the data and store it in a `DataFrame`.
```
import pandas as pd
gss = pd.read_csv('gss_bayes.csv', index_col=0)
gss.head()
```
The `DataFrame` has one row for each person surveyed and one column for each variable I selected.
The columns are
* `caseid`: Respondent id (which is the index of the table).
* `year`: Year when the respondent was surveyed.
* `age`: Respondent's age when surveyed.
* `sex`: Male or female.
* `polviews`: Political views on a range from liberal to conservative.
* `partyid`: Political party affiliation, Democrat, Independent, or Republican.
* `indus10`: [Code](https://www.census.gov/cgi-bin/sssd/naics/naicsrch?chart=2007) for the industry the respondent works in.
Let's look at these variables in more detail, starting with `indus10`.
## Fraction of Bankers
The code for "Banking and related activities" is 6870, so we can select bankers like this:
```
banker = (gss['indus10'] == 6870)
banker.head()
```
The result is a Pandas `Series` that contains the Boolean values `True` and `False`.
If we use the `sum` function on this `Series`, it treats `True` as 1 and `False` as 0, so the total is the number of bankers.
```
banker.sum()
```
In this dataset, there are 728 bankers.
To compute the *fraction* of bankers, we can use the `mean` function, which computes the fraction of `True` values in the `Series`:
```
banker.mean()
```
About 1.5% of the respondents work in banking, so if we choose a random person from the dataset, the probability they are a banker is about 1.5%.
## The Probability Function
I'll put the code from the previous section in a function that takes a Boolean series and returns a probability:
```
def prob(A):
"""Computes the probability of a proposition, A."""
return A.mean()
```
So we can compute the fraction of bankers like this:
```
prob(banker)
```
Now let's look at another variable in this dataset.
The values of the column `sex` are encoded like this:
```
1 Male
2 Female
```
So we can make a Boolean series that is `True` for female respondents and `False` otherwise.
```
female = (gss['sex'] == 2)
```
And use it to compute the fraction of respondents who are women.
```
prob(female)
```
The fraction of women in this dataset is higher than in the adult U.S. population because [the GSS does not include people living in institutions](https://gss.norc.org/faq) like prisons and military housing, and those populations are more likely to be male.
## Political Views and Parties
The other variables we'll consider are `polviews`, which describes the political views of the respondents, and `partyid`, which describes their affiliation with a political party.
The values of `polviews` are on a seven-point scale:
```
1 Extremely liberal
2 Liberal
3 Slightly liberal
4 Moderate
5 Slightly conservative
6 Conservative
7 Extremely conservative
```
I'll define `liberal` to be `True` for anyone whose response is "Extremely liberal", "Liberal", or "Slightly liberal".
```
liberal = (gss['polviews'] <= 3)
```
Here's the fraction of respondents who are liberal by this definition.
```
prob(liberal)
```
If we choose a random person in this dataset, the probability they are liberal is about 27%.
The values of `partyid` are encoded like this:
```
0 Strong democrat
1 Not strong democrat
2 Independent, near democrat
3 Independent
4 Independent, near republican
5 Not strong republican
6 Strong republican
7 Other party
```
I'll define `democrat` to include respondents who chose "Strong democrat" or "Not strong democrat":
```
democrat = (gss['partyid'] <= 1)
```
And here's the fraction of respondents who are Democrats, by this definition.
```
prob(democrat)
```
## Conjunction
Now that we have a definition of probability and a function that computes it, let's move on to conjunction.
"Conjunction" is another name for the logical `and` operation. If you have two [propositions](https://en.wikipedia.org/wiki/Proposition), `A` and `B`, the conjunction `A and B` is `True` if both `A` and `B` are `True`, and `False` otherwise.
If we have two Boolean series, we can use the `&` operator to compute their conjunction.
For example, we have already computed the probability that a respondent is a banker.
```
prob(banker)
```
And the probability that they are a Democrat:
```
prob(democrat)
```
Now we can compute the probability that a respondent is a banker *and* a Democrat:
```
prob(banker & democrat)
```
As we should expect, `prob(banker & democrat)` is less than `prob(banker)`, because not all bankers are Democrats.
We expect conjunction to be commutative; that is, `A & B` should be the same as `B & A`. To check, we can also compute `prob(democrat & banker)`:
```
prob(democrat & banker)
```
As expected, they are the same.
## Conditional Probability
Conditional probability is a probability that depends on a condition, but that might not be the most helpful definition. Here are some examples:
* What is the probability that a respondent is a Democrat, given that they are liberal?
* What is the probability that a respondent is female, given that they are a banker?
* What is the probability that a respondent is liberal, given that they are female?
Let's start with the first one, which we can interpret like this: "Of all the respondents who are liberal, what fraction are Democrats?"
We can compute this probability in two steps:
1. Select all respondents who are liberal.
2. Compute the fraction of the selected respondents who are Democrats.
To select liberal respondents, we can use the bracket operator, `[]`, like this:
```
selected = democrat[liberal]
```
`selected` contains the values of `democrat` for liberal respondents, so `prob(selected)` is the fraction of liberals who are Democrats:
```
prob(selected)
```
A little more than half of liberals are Democrats. If that result is lower than you expected, keep in mind:
1. We used a somewhat strict definition of "Democrat", excluding Independents who "lean" democratic.
2. The dataset includes respondents as far back as 1974; in the early part of this interval, there was less alignment between political views and party affiliation, compared to the present.
Let's try the second example, "What is the probability that a respondent is female, given that they are a banker?"
We can interpret that to mean, "Of all respondents who are bankers, what fraction are female?"
Again, we'll use the bracket operator to select only the bankers and `prob` to compute the fraction that are female.
```
selected = female[banker]
prob(selected)
```
About 77% of the bankers in this dataset are female.
Let's wrap this computation in a function.
I'll define `conditional` to take two Boolean series, `proposition` and `given`, and compute the conditional probability of `proposition` conditioned on `given`:
```
def conditional(proposition, given):
"""Probability of A conditioned on given."""
return prob(proposition[given])
```
We can use `conditional` to compute the probability that a respondent is liberal given that they are female.
```
conditional(liberal, given=female)
```
About 28% of female respondents are liberal.
I included the keyword, `given`, along with the parameter, `female`, to make this expression more readable.
## Conditional Probability Is Not Commutative
We have seen that conjunction is commutative; that is, `prob(A & B)` is always equal to `prob(B & A)`.
But conditional probability is *not* commutative; that is, `conditional(A, B)` is not the same as `conditional(B, A)`.
That should be clear if we look at an example. Previously, we computed the probability a respondent is female, given that they are banker.
```
conditional(female, given=banker)
```
The result shows that the majority of bankers are female. That is not the same as the probability that a respondent is a banker, given that they are female:
```
conditional(banker, given=female)
```
Only about 2% of female respondents are bankers.
I hope this example makes it clear that conditional probability is not commutative, and maybe it was already clear to you. Nevertheless, it is a common error to confuse `conditional(A, B)` and `conditional(B, A)`. We'll see some examples later.
## Condition and Conjunction
We can combine conditional probability and conjunction. For example, here's the probability a respondent is female, given that they are a liberal Democrat.
```
conditional(female, given=liberal & democrat)
```
About 57% of liberal Democrats are female.
And here's the probability they are a liberal female, given that they are a banker:
```
conditional(liberal & female, given=banker)
```
About 17% of bankers are liberal women.
## Laws of Probability
In the next few sections, we'll derive three relationships between conjunction and conditional probability:
* Theorem 1: Using a conjunction to compute a conditional probability.
* Theorem 2: Using a conditional probability to compute a conjunction.
* Theorem 3: Using `conditional(A, B)` to compute `conditional(B, A)`.
Theorem 3 is also known as Bayes's Theorem.
I'll write these theorems using mathematical notation for probability:
* $P(A)$ is the probability of proposition $A$.
* $P(A~\mathrm{and}~B)$ is the probability of the conjunction of $A$ and $B$, that is, the probability that both are true.
* $P(A | B)$ is the conditional probability of $A$ given that $B$ is true. The vertical line between $A$ and $B$ is pronounced "given".
With that, we are ready for Theorem 1.
### Theorem 1
What fraction of bankers are female? We have already seen one way to compute the answer:
1. Use the bracket operator to select the bankers, then
2. Use `mean` to compute the fraction of bankers who are female.
We can write these steps like this:
```
female[banker].mean()
```
Or we can use the `conditional` function, which does the same thing:
```
conditional(female, given=banker)
```
But there is another way to compute this conditional probability, by computing the ratio of two probabilities:
1. The fraction of respondents who are female bankers, and
2. The fraction of respondents who are bankers.
In other words: of all the bankers, what fraction are female bankers?
Here's how we compute this ratio.
```
prob(female & banker) / prob(banker)
```
The result is the same. This example demonstrates a general rule that relates conditional probability and conjunction. Here's what it looks like in math notation:
$$P(A|B) = \frac{P(A~\mathrm{and}~B)}{P(B)}$$
And that's Theorem 1.
### Theorem 2
If we start with Theorem 1 and multiply both sides by $P(B)$, we get Theorem 2.
$$P(A~\mathrm{and}~B) = P(B) ~ P(A|B)$$
This formula suggests a second way to compute a conjunction: instead of using the `&` operator, we can compute the product of two probabilities.
Let's see if it works for `liberal` and `democrat`. Here's the result using `&`:
```
prob(liberal & democrat)
```
And here's the result using Theorem 2:
```
prob(democrat) * conditional(liberal, democrat)
```
They are the same.
### Theorem 3
We have established that conjunction is commutative. In math notation, that means:
$$P(A~\mathrm{and}~B) = P(B~\mathrm{and}~A)$$
If we apply Theorem 2 to both sides, we have
$$P(B) P(A|B) = P(A) P(B|A)$$
Here's one way to interpret that: if you want to check $A$ and $B$, you can do it in either order:
1. You can check $B$ first, then $A$ conditioned on $B$, or
2. You can check $A$ first, then $B$ conditioned on $A$.
If we divide through by $P(B)$, we get Theorem 3:
$$P(A|B) = \frac{P(A) P(B|A)}{P(B)}$$
And that, my friends, is Bayes's Theorem.
To see how it works, let's compute the fraction of bankers who are liberal, first using `conditional`:
```
conditional(liberal, given=banker)
```
Now using Bayes's Theorem:
```
prob(liberal) * conditional(banker, liberal) / prob(banker)
```
They are the same.
### The Law of Total Probability
In addition to these three theorems, there's one more thing we'll need to do Bayesian statistics: the law of total probability.
Here's one form of the law, expressed in mathematical notation:
$$P(A) = P(B_1 \mathrm{and} A) + P(B_2 \mathrm{and} A)$$
In words, the total probability of $A$ is the sum of two possibilities: either $B_1$ and $A$ are true or $B_2$ and $A$ are true.
But this law applies only if $B_1$ and $B_2$ are:
* Mutually exclusive, which means that only one of them can be true, and
* Collectively exhaustive, which means that one of them must be true.
As an example, let's use this law to compute the probability that a respondent is a banker.
We can compute it directly like this:
```
prob(banker)
```
So let's confirm that we get the same thing if we compute male and female bankers separately.
In this dataset all respondents are designated male or female. Recently, the GSS Board of Overseers announced that they will add more inclusive gender questions to the survey (you can read more about this issue, and their decision, at https://gender.stanford.edu/news-publications/gender-news/more-inclusive-gender-questions-added-general-social-survey).
We already have a Boolean `Series` that is `True` for female respondents.
Here's the complementary `Series` for male respondents.
```
male = (gss['sex'] == 1)
```
Now we can compute the total probability of `banker` like this.
```
prob(male & banker) + prob(female & banker)
```
Because `male` and `female` are mutually exclusive and collectively exhaustive (MECE), we get the same result we got by computing the probability of `banker` directly.
Applying Theorem 2, we can also write the law of total probability like this:
$$P(A) = P(B_1) P(A|B_1) + P(B_2) P(A|B_2)$$
And we can test it with the same example:
```
(prob(male) * conditional(banker, given=male) +
prob(female) * conditional(banker, given=female))
```
When there are more than two conditions, it is more concise to write the law of total probability as a summation:
$$P(A) = \sum_i P(B_i) P(A|B_i)$$
Again, this holds as long as the conditions, $B_i$ are mutually exclusive and collectively exhaustive.
As an example, let's consider `polviews`, which has seven different values.
```
B = gss['polviews']
B.value_counts().sort_index()
```
On this scale, `4.0` represents "Moderate".
So we can compute the probability of a moderate banker like this:
```
i = 4
prob(B==i) * conditional(banker, B==i)
```
And we can use `sum` and a [generator expression](https://www.johndcook.com/blog/2020/01/15/generator-expression/) to compute the summation.
```
sum(prob(B==i) * conditional(banker, B==i)
for i in range(1, 8))
```
The result is the same.
In this example, using the law of total probability is a lot more work than computing the probability directly, but it will turn out to be useful, I promise.
## Summary
Here's what we have so far:
**Theorem 1** gives us a way to compute a conditional probability using a conjunction:
$$P(A|B) = \frac{P(A~\mathrm{and}~B)}{P(B)}$$
**Theorem 2** gives us a way to compute a conjunction using a conditional probability:
$$P(A~\mathrm{and}~B) = P(B) P(A|B)$$
**Theorem 3**, also known as Bayes's Theorem, gives us a way to get from $P(A|B)$ to $P(B|A)$, or the other way around:
$$P(A|B) = \frac{P(A) P(B|A)}{P(B)}$$
**The Law of Total Probability** provides a way to compute probabilities by adding up the pieces:
$$P(A) = \sum_i P(B_i) P(A|B_i)$$
At this point you might ask, "So what?" If we have all of the data, we can compute any probability we want, any conjunction, or any conditional probability, just by counting. We don't have to use these formulas.
And you are right, *if* we have all of the data. But often we don't, and in that case, these formulas can be pretty useful -- especially Bayes's Theorem.
In the next chapter, we'll see how.
## Exercises
**Exercise:** Let's use the tools in this chapter to solve a variation of the Linda problem.
> Linda is 31 years old, single, outspoken, and very bright. She majored in philosophy. As a student, she was deeply concerned with issues of discrimination and social justice, and also participated in anti-nuclear demonstrations. Which is more probable?
> 1. Linda is a banker.
> 2. Linda is a banker and considers herself a liberal Democrat.
To answer this question, compute
* The probability that Linda is a female banker,
* The probability that Linda is a liberal female banker, and
* The probability that Linda is a liberal female banker and a Democrat.
```
# Solution goes here
# Solution goes here
# Solution goes here
```
**Exercise:** Use `conditional` to compute the following probabilities:
* What is the probability that a respondent is liberal, given that they are a Democrat?
* What is the probability that a respondent is a Democrat, given that they are liberal?
Think carefully about the order of the arguments you pass to `conditional`.
```
# Solution goes here
# Solution goes here
```
**Exercise:** There's a [famous quote](https://quoteinvestigator.com/2014/02/24/heart-head/) about young people, old people, liberals, and conservatives that goes something like:
> If you are not a liberal at 25, you have no heart. If you are not a conservative at 35, you have no brain.
Whether you agree with this proposition or not, it suggests some probabilities we can compute as an exercise.
Rather than use the specific ages 25 and 35, let's define `young` and `old` as under 30 or over 65:
```
young = (gss['age'] < 30)
prob(young)
old = (gss['age'] >= 65)
prob(old)
```
For these thresholds, I chose round numbers near the 20th and 80th percentiles. Depending on your age, you may or may not agree with these definitions of "young" and "old".
I'll define `conservative` as someone whose political views are "Conservative", "Slightly Conservative", or "Extremely Conservative".
```
conservative = (gss['polviews'] >= 5)
prob(conservative)
```
Use `prob` and `conditional` to compute the following probabilities.
* What is the probability that a randomly chosen respondent is a young liberal?
* What is the probability that a young person is liberal?
* What fraction of respondents are old conservatives?
* What fraction of conservatives are old?
For each statement, think about whether it is expressing a conjunction, a conditional probability, or both.
For the conditional probabilities, be careful about the order of the arguments.
If your answer to the last question is greater than 30%, you have it backwards!
```
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
```
| github_jupyter |
<a href="https://colab.research.google.com/github/mowgli28/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments/blob/master/Copy_of_LS_DS_144_Real_world_Experiment_Design.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Lambda School Data Science Module 144
## Real-world Experiment Design

[Induction experiment, Wikipedia](https://commons.wikimedia.org/wiki/File:Induction_experiment.png)
## Prepare - Learn about JavaScript and Google Analytics
Python is great - but with web applications, it's impossible to avoid JavaScript. The lingua franca of the web, JavaScript runs in all browsers, and thus all front-end code must either be JS or transpiled to it. As a data scientist you don't have to learn JavaScript - but you do have to be aware of it, and being able to figure out snippets of it is an invaluable skill to connect your skills with real-world applications.
So, we leave the warm comfort of Python, and venture to a bigger world - check out the [LambdaSchool/AB-Demo repo](https://github.com/LambdaSchool/AB-Demo) and [live experiment](https://lambdaschool.github.io/AB-Demo/) before class.
Additionally, sign up for [Google Analytics](https://www.google.com/analytics) - if you're not sure on the steps or what "property" to give it, you can put a placeholder or wait until the live lecture. Google also has [Analytics documentation](https://support.google.com/analytics/) that is worth a look.
Note - if you use any of the various tracker blocking techniques, it's quite likely you won't show up in Google Analytics. You'll have to disable them to be able to fully test your experiment.
## Live Lecture - Using Google Analytics with a live A/B test
Again we won't do much Python here, but we'll put a few notes and results in the notebook as we go.
## Assignment - Set up your own A/B test!
For a baseline, a straight fork of the Lambda School repo is OK. Getting that working with your own Analytics profile is already a task. But if you get through that, stretch goals:
1. Explore Google Analytics - it's big and changes frequently, but powerful (can track conversions and events, flows, etc.)
2. Customize the experiment to be more interesting/different (try colors!)
3. Check out the various tools for setting up A/B experiments (e.g. [Optimizely](https://www.optimizely.com/) and [alternatives](https://alternativeto.net/software/optimizely/))
4. Try to get enough traffic to actually have more real data (don't spam people, but do share with friends)
5. If you do get more traffic, don't just apply a t-test - dig into the results and use both math and writing to describe your findings
```
https://mowgli28.github.io/AB-Demo/
```
| github_jupyter |
# 目標:利用openpose和神經網路辨識T-POSE和DAB姿勢。
# 組員:
# 應數一 108701011 游能澤
# 應數一 108701034 柯里橫
# 應數一 108701018 池欣霓
# 應數一 108701019 許辰宇
# 分工:
# 構想與建設環境:游能澤、柯里橫
# 程式建構:柯里橫
# 由於一直無法使用pyopenpose函式,無法透過
# openpose取得樣本,只好找別人做好的。
# 到https://github.com/burningion/dab-and-tpose-controlled-lights/tree/master/data
# 載下作者生成好的數據,開個data資料夾放進去
# dabs.npy tposes.npy other.npy
# more-dabs.npy more-tposes.npy more-other.npy
# test-dabs.npy
```
import numpy as np
```
# 這幾個檔案是以numpy儲存的二進制文件
# 裡頭有我們需要的特徵
```
dabDataset = np.load('data/dabs.npy')
tposeDataset = np.load('data/tposes.npy')
otherDataset = np.load('data/other.npy')
dabDataset[0]
dabDataset.shape
dabDataset[0].shape
```
# Adding our Labels
# Our labels come from the [BODY_25 Pose Output format](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/output.md#pose-output-format-body_25) available at the repo.
```
labels = ["Nose", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow",
"LWrist", "MidHip", "RHip", "RKnee", "RAnkle", "LHip", "LKnee", "LAnkle",
"REye", "LEye", "REar", "LEar", "LBigToe", "LSmallToe", "LHeel", "RBigToe",
"RSmallToe", "RHeel", "Background"]
```
## 看數據有三個維度分別是X、Y、Confidence,不需要用到Confidence。
```
properLabels = []
for label in labels:
properLabels.append(label + 'X')
properLabels.append(label + 'Y')
properLabels.append(label + 'Confidence')
import csv
with open('data/dabs.csv', 'w+') as dabcsv:
dabwriter = csv.writer(dabcsv, delimiter=',')
dabwriter.writerow(properLabels)
for cell in dabDataset:
dabwriter.writerow(cell.flatten())
with open('data/tposes.csv', 'w+') as tposecsv:
tposewriter = csv.writer(tposecsv, delimiter=',')
tposewriter.writerow(properLabels)
for cell in tposeDataset:
tposewriter.writerow(cell.flatten())
with open('data/other.csv', 'w+') as othercsv:
otherwriter = csv.writer(othercsv, delimiter=',')
otherwriter.writerow(properLabels)
for cell in otherDataset:
otherwriter.writerow(cell.flatten())
```
# 用CSV檔看訓練資料略少,但還是試著訓練看看。
# Creating a Labeled Dataset for Training and Testing
# We'll use 0 for other poses, 1 for dabs, and 2 for tposes.
```
labels = np.zeros(len(otherDataset))
labels = np.append(labels, np.full((len(dabDataset)), 1))
labels = np.append(labels, np.full((len(tposeDataset)), 2))
print(labels)
print("%i total examples for training." % len(labels))
dataset = np.append(otherDataset, dabDataset, axis=0)
dataset = np.append(dataset, tposeDataset, axis=0)
print(dataset)
dataset.shape
```
## 讓數值變成0~1之間
```
dataset[:,:,1] / 1280
```
# Shuffle labels and features
```
from sklearn.utils import shuffle
X, y = shuffle(dataset, labels)
from keras.utils.np_utils import to_categorical
y = to_categorical(y, 3)
print(y.shape[1])
```
# 架設神經網路
```
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD
X = X.reshape(len(X), 75)
dataset[0]
model = Sequential()
model.add(Dense(128, activation = 'relu', input_shape = (75,)))
model.add(Dropout(0.5))
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(y.shape[1], activation = 'softmax'))
model.compile(optimizer = SGD(lr = 0.005),
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.fit(X, y, epochs = 200, batch_size = 64)
```
# Cleaning up data further¶
## accuracy too low
```
X, y = shuffle(dataset, labels)
y = to_categorical(y, 3)
print(X.shape)
X[:,:,0] = X[:,:,0] / 720
X[:,:,1] = X[:,:,1] / 1280
X = X[:,:,:2]
print(X.shape)
X = X.reshape(56, 50)
print(X.shape)
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(50,)))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(X, y, epochs=200,batch_size=64)
```
# Adding More Data and Beginning Data Augmentation
```
dabDataset = np.load('data/more-dabs.npy')
tposeDataset = np.load('data/more-tposes.npy')
otherDataset = np.load('data/more-other.npy')
labels1 = np.zeros(len(otherDataset))
labels1 = np.append(labels1, np.full((len(dabDataset)), 1))
labels1 = np.append(labels1, np.full((len(tposeDataset)), 2))
print(labels1)
print("%i total new samples" % len(labels1))
dataset1 = np.append(otherDataset, dabDataset, axis=0)
dataset1 = np.append(dataset1, tposeDataset, axis=0)
X1, y1 = shuffle(dataset1, labels1)
y1 = to_categorical(y1, 3)
print(X1.shape)
X1[:,:,0] = X1[:,:,0] / 720
X1[:,:,1] = X1[:,:,1] / 1280
X1 = X1[:,:,:2]
print(X1.shape)
X1 = X1.reshape(len(X1), 50)
print(X1.shape)
model = Sequential()
model.add(Dense(128, activation = 'relu', input_shape = (50,)))
model.add(Dropout(0.5))
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(y.shape[1], activation = 'softmax'))
model.compile(optimizer = 'Adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
history = model.fit(X1, y1, epochs = 200, batch_size = 64)
model.predict_classes(X)
X
y
model.test_on_batch(X, y)
model.save('data/dab-tpose-other.h5')
import keras
modello = keras.models.load_model('data/dab-tpose-other.h5')
dabDataset = np.load('data/test-dabs.npy')
dabDataset[:,:,0] = dabDataset[:,:,0] / 720
dabDataset[:,:,1] = dabDataset[:,:,1] / 1280
dabDataset = dabDataset[:,:,:2]
dabDataset = dabDataset.reshape(len(dabDataset), 50)
modello.predict_classes(dabDataset)
```
# 預測結果全是1,從前面我們設定的數字,1代表dabs,預測成功。
# 關於專案分工的部分預計資料前處理和使用模型交給另外兩個人,但費勁了洪荒之力還是做不出來,最後放棄做那些部分,剩下上訓練模型的部分。
# 作者在他自己的blog上發這篇(https://www.makeartwithpython.com/blog/dab-and-tpose-controlled-lights/,裡頭提到了「All of the code, models, and training data are freely available on Github.」,可能在是否能公開發表上有模擬地帶,下次會注意授權問題,並且先寄信詢問。
# 模型是做好了,但卻無法使用,由於導入pyopenpose的不成功,無法在python裡呼叫openpose來取得測試資料,沒辦法實際應用。
| github_jupyter |
# High Molecular Weight Petroleum Pseudocomponents
Thermo is a general phase equilibrium engine, and if the user provides enough properties for the components, there is no issue adding your own components. In this basic example below, a made-up extended gas analysis is used to specify a gas consisting of the standard real components and three heavier fractions, C10+, C12+ and C15+.
A bare minimum of basic properties are estimated using the Kesler-Lee method (1976), and the estimated fraction molecular weights are turned into atomic compositions. The heat capacities of each pseudocomponent is found with
the similarity variable concept of Lastovka and Shaw (2013) based on atomic composition.
This example ends with calculating a flash at 270 Kelvin and 1 bar.
```
from math import log, exp
import numpy as np
from scipy.constants import psi
from thermo import *
from chemicals import *
def Tc_Kesler_Lee_SG_Tb(SG, Tb):
r'''Estimates critical temperature of a hydrocarbon compound or petroleum
fraction using only its specific gravity and boiling point, from
[1]_ as presented in [2]_.
.. math::
T_c = 341.7 + 811.1SG + [0.4244 + 0.1174SG]T_b
+ \frac{[0.4669 - 3.26238SG]10^5}{T_b}
Parameters
----------
SG : float
Specific gravity of the fluid at 60 degrees Farenheight [-]
Tb : float
Boiling point the fluid [K]
Returns
-------
Tc : float
Estimated critical temperature [K]
Notes
-----
Model shows predictions for Tc, Pc, MW, and omega.
Original units in degrees Rankine.
Examples
--------
Example 2.2 from [2]_, but with K instead of R.
>>> Tc_Kesler_Lee_SG_Tb(0.7365, 365.555)
545.0124354151242
References
----------
.. [1] Kesler, M. G., and B. I. Lee. "Improve Prediction of Enthalpy of
Fractions." Hydrocarbon Processing (March 1976): 153-158.
.. [2] Ahmed, Tarek H. Equations of State and PVT Analysis: Applications
for Improved Reservoir Modeling. Gulf Pub., 2007.
'''
Tb = 9/5.*Tb # K to R
Tc = 341.7 + 811.1*SG + (0.4244 + 0.1174*SG)*Tb + ((0.4669 - 3.26238*SG)*1E5)/Tb
Tc = 5/9.*Tc # R to K
return Tc
def Pc_Kesler_Lee_SG_Tb(SG, Tb):
r'''Estimates critical pressure of a hydrocarbon compound or petroleum
fraction using only its specific gravity and boiling point, from
[1]_ as presented in [2]_.
.. math::
\ln(P_c) = 8.3634 - \frac{0.0566}{SG} - \left[0.24244 + \frac{2.2898}
{SG} + \frac{0.11857}{SG^2}\right]10^{-3}T_b
+ \left[1.4685 + \frac{3.648}{SG} + \frac{0.47227}{SG^2}\right]
10^{-7}T_b^2-\left[0.42019 + \frac{1.6977}{SG^2}\right]10^{-10}T_b^3
Parameters
----------
SG : float
Specific gravity of the fluid at 60 degrees Farenheight [-]
Tb : float
Boiling point the fluid [K]
Returns
-------
Pc : float
Estimated critical pressure [Pa]
Notes
-----
Model shows predictions for Tc, Pc, MW, and omega.
Original units in degrees Rankine and psi.
Examples
--------
Example 2.2 from [2]_, but with K instead of R and Pa instead of psi.
>>> Pc_Kesler_Lee_SG_Tb(0.7365, 365.555)
3238323.346840464
References
----------
.. [1] Kesler, M. G., and B. I. Lee. "Improve Prediction of Enthalpy of
Fractions." Hydrocarbon Processing (March 1976): 153-158.
.. [2] Ahmed, Tarek H. Equations of State and PVT Analysis: Applications
for Improved Reservoir Modeling. Gulf Pub., 2007.
'''
Tb = 9/5.*Tb # K to R
Pc = exp(8.3634 - 0.0566/SG - (0.24244 + 2.2898/SG + 0.11857/SG**2)*1E-3*Tb
+ (1.4685 + 3.648/SG + 0.47227/SG**2)*1E-7*Tb**2
-(0.42019 + 1.6977/SG**2)*1E-10*Tb**3)
Pc = Pc*psi
return Pc
def MW_Kesler_Lee_SG_Tb(SG, Tb):
r'''Estimates molecular weight of a hydrocarbon compound or petroleum
fraction using only its specific gravity and boiling point, from
[1]_ as presented in [2]_.
.. math::
MW = -12272.6 + 9486.4SG + [4.6523 - 3.3287SG]T_b + [1-0.77084SG
- 0.02058SG^2]\left[1.3437 - \frac{720.79}{T_b}\right]\frac{10^7}{T_b}
+ [1-0.80882SG + 0.02226SG^2][1.8828 - \frac{181.98}{T_b}]
\frac{10^{12}}{T_b^3}
Parameters
----------
SG : float
Specific gravity of the fluid at 60 degrees Farenheight [-]
Tb : float
Boiling point the fluid [K]
Returns
-------
MW : float
Estimated molecular weight [g/mol]
Notes
-----
Model shows predictions for Tc, Pc, MW, and omega.
Original units in degrees Rankine.
Examples
--------
Example 2.2 from [2]_, but with K instead of R and Pa instead of psi.
>>> MW_Kesler_Lee_SG_Tb(0.7365, 365.555)
98.70887589833501
References
----------
.. [1] Kesler, M. G., and B. I. Lee. "Improve Prediction of Enthalpy of
Fractions." Hydrocarbon Processing (March 1976): 153-158.
.. [2] Ahmed, Tarek H. Equations of State and PVT Analysis: Applications
for Improved Reservoir Modeling. Gulf Pub., 2007.
'''
Tb = 9/5.*Tb # K to R
MW = (-12272.6 + 9486.4*SG + (4.6523 - 3.3287*SG)*Tb + (1.-0.77084*SG - 0.02058*SG**2)*
(1.3437 - 720.79/Tb)*1E7/Tb + (1.-0.80882*SG + 0.02226*SG**2)*
(1.8828 - 181.98/Tb)*1E12/Tb**3)
return MW
def omega_Kesler_Lee_SG_Tb_Tc_Pc(SG, Tb, Tc=None, Pc=None):
r'''Estimates accentric factor of a hydrocarbon compound or petroleum
fraction using only its specific gravity and boiling point, from
[1]_ as presented in [2]_. If Tc and Pc are provided, the Kesler-Lee
routines for estimating them are not used.
For Tbr > 0.8:
.. math::
\omega = -7.904 + 0.1352K - 0.007465K^2 + 8.359T_{br}
+ ([1.408-0.01063K]/T_{br})
Otherwise:
.. math::
\omega = \frac{-\ln\frac{P_c}{14.7} - 5.92714 + \frac{6.09648}{T_{br}}
+ 1.28862\ln T_{br} - 0.169347T_{br}^6}{15.2518 - \frac{15.6875}{T_{br}}
- 13.4721\ln T_{br} + 0.43577T_{br}^6}
K = \frac{T_b^{1/3}}{SG}
T_{br} = \frac{T_b}{T_c}
Parameters
----------
SG : float
Specific gravity of the fluid at 60 degrees Farenheight [-]
Tb : float
Boiling point the fluid [K]
Tc : float, optional
Estimated critical temperature [K]
Pc : float, optional
Estimated critical pressure [Pa]
Returns
-------
omega : float
Acentric factor [-]
Notes
-----
Model shows predictions for Tc, Pc, MW, and omega.
Original units in degrees Rankine and psi.
Examples
--------
Example 2.2 from [2]_, but with K instead of R and Pa instead of psi.
>>> omega_Kesler_Lee_SG_Tb_Tc_Pc(0.7365, 365.555, 545.012, 3238323.)
0.306392118159797
References
----------
.. [1] Kesler, M. G., and B. I. Lee. "Improve Prediction of Enthalpy of
Fractions." Hydrocarbon Processing (March 1976): 153-158.
.. [2] Ahmed, Tarek H. Equations of State and PVT Analysis: Applications
for Improved Reservoir Modeling. Gulf Pub., 2007.
'''
if Tc is None:
Tc = Tc_Kesler_Lee_SG_Tb(SG, Tb)
if Pc is None:
Pc = Pc_Kesler_Lee_SG_Tb(SG, Tb)
Tb = 9/5.*Tb # K to R
Tc = 9/5.*Tc # K to R
K = Tb**(1/3.)/SG
Tbr = Tb/Tc
if Tbr > 0.8:
omega = -7.904 + 0.1352*K - 0.007465*K**2 + 8.359*Tbr + ((1.408-0.01063*K)/Tbr)
else:
omega = ((-log(Pc/101325.) - 5.92714 + 6.09648/Tbr + 1.28862*log(Tbr)
- 0.169347*Tbr**6) / (15.2518 - 15.6875/Tbr - 13.4721*log(Tbr) +0.43577*Tbr**6))
return omega
# Basic composition and names. All pure component properties are obtained from Chemicals and Thermo.
pure_constants = ChemicalConstantsPackage.constants_from_IDs(
['water', 'hydrogen', 'helium', 'nitrogen', 'carbon dioxide', 'hydrogen sulfide', 'methane',
'ethane', 'propane', 'isobutane', 'n-butane', 'isopentane', 'n-pentane', 'hexane',
'heptane', 'octane', 'nonane'])
pure_fractions = [.02, .00005, .00018, .009, .02, .002, .82, .08, .031,
.009, .0035, .0033, .0003, .0007, .0004, .00005, .00002]
pseudo_names = ['C10-C11', 'C12-C14', 'C15+']
pseudo_carbon_numbers = [10.35, 12.5, 16.9]
pseudo_SGs = [.73, .76, .775] # Specific gravity values are based of the alkane series
pseudo_Tbs = [447, 526, 589]
# Using the estimation methods defined earlier, we obtain some critical properties
pseudo_Tcs = [Tc_Kesler_Lee_SG_Tb(SG, Tb) for SG, Tb in zip(pseudo_SGs, pseudo_Tbs)]
pseudo_Pcs = [Pc_Kesler_Lee_SG_Tb(SG, Tb) for SG, Tb in zip(pseudo_SGs, pseudo_Tbs)]
pseudo_MWs = [MW_Kesler_Lee_SG_Tb(SG, Tb) for SG, Tb in zip(pseudo_SGs, pseudo_Tbs)]
pseudo_omegas = [omega_Kesler_Lee_SG_Tb_Tc_Pc(SG, Tb) for SG, Tb in zip(pseudo_SGs, pseudo_Tbs)]
# Estimate the hydroen counts
hydrogen_counts = [(MW - C*periodic_table.C.MW)/periodic_table.H.MW
for C, MW in zip(pseudo_carbon_numbers, pseudo_MWs)]
# Get the atomic compositions
pseudo_atoms = [{'C': C, 'H': H} for C, H in zip(pseudo_carbon_numbers, hydrogen_counts)]
# Calculate the similarity variable of each species
similarity_variables = [similarity_variable(atoms=atoms) for atoms in pseudo_atoms]
pseudo_fractions = [.0003, .00015, .00005]
pseudos = ChemicalConstantsPackage(names=pseudo_names, MWs=pseudo_MWs, Tbs=pseudo_Tbs,
atomss=pseudo_atoms,
Tcs=pseudo_Tcs, Pcs=pseudo_Pcs, omegas=pseudo_omegas,
similarity_variables=similarity_variables)
# Add the pure components and the pseudocomponents to create a new package of constant values
# which will be used by the phase and flash objects
constants = pure_constants + pseudos
# Obtain the temperature and pressure dependent objects
properties = PropertyCorrelationsPackage(constants=constants)
# This is the feed composition
zs = normalize(pure_fractions + pseudo_fractions)
T = 270 # K
P = 1e5 # bar
kijs = np.zeros((constants.N, constants.N)).tolist() # kijs left as zero in this example
eos_kwargs = dict(Tcs=constants.Tcs, Pcs=constants.Pcs, omegas=constants.omegas, kijs=kijs)
# The API SRK equation of state is used, but other cubic equations of state can be uesd instead
gas = CEOSGas(APISRKMIX, eos_kwargs, HeatCapacityGases=properties.HeatCapacityGases, T=T, P=P, zs=zs)
liq = CEOSLiquid(APISRKMIX, eos_kwargs, HeatCapacityGases=properties.HeatCapacityGases, T=T, P=P, zs=zs)
liq2 = CEOSLiquid(APISRKMIX, eos_kwargs, HeatCapacityGases=properties.HeatCapacityGases, T=T, P=P, zs=zs)
phase_list = [gas, liq, liq]
# Set up the three phase flash engine
flashN = FlashVLN(constants, properties, liquids=[liq, liq2], gas=gas)
# Do the flash, and get some properties
res = flashN.flash(T=T, P=P, zs=zs)
res.phase_count, res.gas_beta, res.liquids_betas
res.H(), res.Cp_mass(), res.MW(), res.gas.mu(), res.gas.k()
res.heaviest_liquid.rho_mass(), res.lightest_liquid.rho_mass()
```
| github_jupyter |
To participate, you'll need to git clone (or download the .zip from GitHub):
https://github.com/mbeyeler/2018-neurohack-skimage
You can do that in git using:
git clone https://github.com/mbeyeler/2018-neurohack-skimage
If you have already cloned the material, please issue `git pull` now and reload the notebook to ensure that you have the latest updates.
# Tutorial 1: Image Manipulation
This tutorial was adapted from https://github.com/scikit-image/skimage-tutorials/blob/master/lectures/00_images_are_arrays.ipynb.
```
%matplotlib inline
```
## Images are NumPy arrays
Images are represented in ``scikit-image`` using standard ``numpy`` arrays. This allows maximum inter-operability with other libraries in the scientific Python ecosystem, such as ``matplotlib`` and ``scipy``.
Let's see how to build a grayscale image as a 2D array:
```
import numpy as np
from matplotlib import pyplot as plt
random_image = np.random.random([500, 500])
plt.imshow(random_image, cmap='gray')
plt.colorbar();
```
The same holds for "real-world" images:
```
from skimage import data
coins = data.coins()
print('Type:', type(coins))
print('dtype:', coins.dtype)
print('shape:', coins.shape)
plt.imshow(coins, cmap='gray');
```
A color image is a 3D array, where the last dimension has size 3 and represents the red, green, and blue channels:
```
cat = data.chelsea()
print("Shape:", cat.shape)
print("Values min/max:", cat.min(), cat.max())
plt.imshow(cat);
```
These are *just NumPy arrays*. E.g., we can make a red square by using standard array slicing and manipulation:
```
cat[10:110, 10:110, :] = [255, 0, 0] # [red, green, blue]
plt.imshow(cat);
```
Images can also include transparent regions by adding a 4th dimension, called an *alpha layer*.
### Other shapes, and their meanings
|Image type|Coordinates|
|:---|:---|
|2D grayscale|(row, column)|
|2D multichannel|(row, column, channel)|
|3D grayscale (or volumetric) |(plane, row, column)|
|3D multichannel|(plane, row, column, channel)|
### Data types and image values
In literature, one finds different conventions for representing image values:
```
0 - 255 where 0 is black, 255 is white
0 - 1 where 0 is black, 1 is white
```
``scikit-image`` supports both conventions--the choice is determined by the
data-type of the array.
E.g., here, I generate two valid images:
## Displaying images using matplotlib
```
from skimage import data
img0 = data.chelsea()
img1 = data.rocket()
import matplotlib.pyplot as plt
f, (ax0, ax1) = plt.subplots(ncols=2, figsize=(20, 10))
ax0.imshow(img0)
ax0.set_title('Cat', fontsize=18)
ax0.axis('off')
ax1.imshow(img1)
ax1.set_title('Rocket', fontsize=18)
ax1.set_xlabel(r'Launching position $\alpha=320$')
ax1.vlines([202, 450], 0, img1.shape[0], colors='white', linewidth=3, label='Side tower position')
ax1.legend();
```
## Drawing
```
from skimage import draw
# Draw a circle with radius 50 at (200, 150):
r, c = draw.circle(200, 150, 50)
# Change only the green channel:
img1[r, c, 1] = 255
plt.imshow(img1)
```
For more on plotting, see the [Matplotlib documentation](https://matplotlib.org/gallery/index.html#images-contours-and-fields) and [pyplot API](https://matplotlib.org/api/pyplot_summary.html).
## Image I/O
Mostly, we won't be using input images from the scikit-image example data sets. Those images are typically stored in JPEG or PNG format. Since scikit-image operates on NumPy arrays, *any* image reader library that provides arrays will do. Options include imageio, matplotlib, pillow, etc.
scikit-image conveniently wraps many of these in the `io` submodule, and will use whichever of the libraries mentioned above are installed:
```
from skimage import io
image = io.imread('../img/skimage-logo.png')
print(type(image))
print(image.dtype)
print(image.shape)
print(image.min(), image.max())
plt.imshow(image);
```
We also have the ability to load multiple images, or multi-layer TIFF images:
```
ic = io.ImageCollection('../img/*.jpg')
print('Type:', type(ic))
ic.files
```
# Exercise: Visualizing RGB channels
Display the different color channels of the image along (each as a gray-scale image). Start with the following template:
```
# --- read in the image ---
image = io.imread('../img/skimage-logo.png')
# --- assign each color channel to a different variable ---
r = ...
g = ...
b = ...
# --- display the image and r, g, b channels ---
f, axes = plt.subplots(1, 4, figsize=(16, 5))
for ax in axes:
ax.axis('off')
(ax_r, ax_g, ax_b, ax_color) = axes
ax_r.imshow(r, cmap='gray')
ax_r.set_title('red channel')
ax_g.imshow(g, cmap='gray')
ax_g.set_title('green channel')
ax_b.imshow(b, cmap='gray')
ax_b.set_title('blue channel')
# --- Here, we stack the R, G, and B layers again
# to form a color image ---
ax_color.imshow(np.stack([r, g, b], axis=2))
ax_color.set_title('all channels');
```
Now, take a look at the following R, G, and B channels. How would their combination look? (Write some code to confirm your intuition.)
| github_jupyter |
Grade 5/7
No implementation of Cash-Karp coefficients
Implementation of adaptive step is not set by Cash Karp algorithm
## Create a notebook to perform Runge-Kutta integration for multiple coupled variables.
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
```
This cell and the one following it are not a requirement, it is only for looks
```
#use colors.subclass(or command; e.g bold).colorname to print
#examples: print(colors.bold, colors.fg.blue, "this will be bold and blue")
#everything after this will have that format until the following command
#is given: print(colors.reset, "now, this text will be normal")
class colors:
reset='\033[0m' #reset all colors with colors.reset
bold='\033[01m'
underline='\033[04m'
strikethrough='\033[09m'
reverse='\033[07m'
class fg: #foreground subclass
black='\033[30m'
red='\033[31m'
green='\033[32m'
orange='\033[33m'
blue='\033[34m'
purple='\033[35m'
cyan='\033[36m'
lightgrey='\033[37m'
darkgrey='\033[90m'
lightred='\033[91m'
lightgreen='\033[92m'
yellow='\033[93m'
lightblue='\033[94m'
pink='\033[95m'
lightcyan='\033[96m'
class bg: #background subclass
black='\033[40m'
red='\033[41m'
green='\033[42m'
orange='\033[43m'
blue='\033[44m'
purple='\033[45m'
cyan='\033[46m'
lightgrey='\033[47m'
```
The above code was provided by https://www.geeksforgeeks.org/print-colors-python-terminal/
### Define our coupled derivatives to integrate
```
def dydx(x,y):
#set the derivatives
#so we can write
#dydx = z
#dzdx = -y
#we will set y = y[0]
#we will set z = y[1]
#declare an array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx = -y
y_derivs[1] = -1*y[0]
#here we have to return an array with dydx
return y_derivs
```
### Define the 4th order RK method
```
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k? arrays; (? is a wildcard, used for k1,k2,...,kn)
k1=np.zeros(nv)
k2=np.zeros(nv)
k3=np.zeros(nv)
k4=np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi + 0.5*h
#define x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi[:] + k3[:]
y_derivs = dydx(x_ipo,y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6.
return yipo
```
### Define an adaptive step size driver for RK4
```
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a max number of iterations
imax = 10000
#set an iteration variable
i = 0
#create an error
Delta = np.full(nv,2*tol)
#remember the step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0):
#estimate our error by taking one step of size h
#vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step)
#compute an error
Delta = np.fabs(y_2 - y_11)
#if the error is too large, take a smaller step
if(Delta.max()/tol > 1.0):
#our error is too large, decrease the step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
```
## Define a wrapper for RK4
```
def rk4_mv(dydx,a,b,y_a,tol):
#dfdx is derivative w.r.t. x
#a is lower bound
#b is upper bound
#y_a are boundary conditions
#tol is tolerance for integrating y
#define our starting step
xi = a
yi = y_a.copy()
#an initial step size == make very small
h = 1.0e-4 * (b-a)
#set max number of iterations
imax = 10000
#set an iteration variable
i = 0
#set the number of coupled ODEs to the size of y_a
nv = len(y_a)
#set the initial conditions
x = np.full(1,a)
y = np.full((1,nv),y_a)
#set a flag
flag = 1
#loop until we reach the right side
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#update the step
h = h_new
#prevent an overshoot
if(xi+h_step>b):
#take a smaller step
h = b-xi
#recalculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag = 0
#update values
xi += h_step
yi[:] = yi_new[:]
#add the step to the arrays
x = np.append(x,xi)
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:]
del y
y = y_new
#prevent too many iterations
if(i>=imax):
print(colors.bold, colors.fg.red, colors.bg.black, "Maximum iterations reached.", colors.reset)
raise StopIteration("Iteration number = ",i)
#iterate
i += 1
#output some information
s = "i =%3d\tx = %9.8f\th = %9.8f\tb=%9.8f" % (i,xi,h_step,b)
print(s)
#break if new xi is == b
if(xi==b):
flag = 0
#return the answer
print(colors.bold, colors.fg.purple, "Number of iterations:", i, colors.reset)
return x,y
```
### Perform the integration
```
a = 0.0
b = 2.0 * np.pi
y_0 = np.zeros(2)
y_0[0] = 0.0
y_0[1] = 1.0
nv = 2
tolerance = 1.0e-6
#perform the integration
x,y = rk4_mv(dydx,a,b,y_0,tolerance)
```
## Plot the result
```
plt.plot(x,y[:,0],'o',color='y',label='y(x)')
plt.plot(x,y[:,1],'o',color='silver',label='dydx(x)')
xx = np.linspace(0,2.0*np.pi,1000)
plt.plot(xx,np.sin(xx),color='k',label='sin(x)')
plt.plot(xx,np.cos(xx),color='purple',label='cos(x)')
plt.xlabel('x',color='chocolate',fontsize='large')
plt.ylabel('y, dy/dx',color='chocolate',fontsize='large')
plt.legend(frameon=False,fontsize='medium')
```
## Plot the error
Notice the errors will exceed our "tolerance".
```
sin_X = np.sin(x)
cos_X = np.cos(x)
y_error = (y[:,0]-sin_X)
dydx_error = (y[:,1]-cos_X)
plt.plot(x, y_error, label="y(x) Error", color='fuchsia')
plt.plot(x, dydx_error, label="dydx(x) Error", color='aqua')
plt.legend(frameon=False, fontsize='medium')
```
| github_jupyter |
# Amazon SageMaker Multi-Model Endpoints using XGBoost
With [Amazon SageMaker multi-model endpoints](https://docs.aws.amazon.com/sagemaker/latest/dg/multi-model-endpoints.html), customers can create an endpoint that seamlessly hosts up to thousands of models. These endpoints are well suited to use cases where any one of a large number of models, which can be served from a common inference container to save inference costs, needs to be invokable on-demand and where it is acceptable for infrequently invoked models to incur some additional latency. For applications which require consistently low inference latency, an endpoint deploying a single model is still the best choice.
At a high level, Amazon SageMaker manages the loading and unloading of models for a multi-model endpoint, as they are needed. When an invocation request is made for a particular model, Amazon SageMaker routes the request to an instance assigned to that model, downloads the model artifacts from S3 onto that instance, and initiates loading of the model into the memory of the container. As soon as the loading is complete, Amazon SageMaker performs the requested invocation and returns the result. If the model is already loaded in memory on the selected instance, the downloading and loading steps are skipped and the invocation is performed immediately.
To demonstrate how multi-model endpoints are created and used, this notebook provides an example using a set of XGBoost models that each predict housing prices for a single location. This domain is used as a simple example to easily experiment with multi-model endpoints.
The Amazon SageMaker multi-model endpoint capability is designed to work across with Mxnet, PyTorch and Scikit-Learn machine learning frameworks (TensorFlow coming soon), SageMaker XGBoost, KNN, and Linear Learner algorithms.
In addition, Amazon SageMaker multi-model endpoints are also designed to work with cases where you bring your own container that integrates with the multi-model server library. An example of this can be found [here](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/advanced_functionality/multi_model_bring_your_own) and documentation [here.](https://docs.aws.amazon.com/sagemaker/latest/dg/build-multi-model-build-container.html)
### Contents
1. [Generate synthetic data for housing models](#Generate-synthetic-data-for-housing-models)
1. [Train multiple house value prediction models](#Train-multiple-house-value-prediction-models)
1. [Create the Amazon SageMaker MultiDataModel entity](#Create-the-Amazon-SageMaker-MultiDataModel-entity)
1. [Create the Multi-Model Endpoint](#Create-the-multi-model-endpoint)
1. [Deploy the Multi-Model Endpoint](#deploy-the-multi-model-endpoint)
1. [Get Predictions from the endpoint](#Get-predictions-from-the-endpoint)
1. [Additional Information](#Additional-information)
1. [Clean up](#Clean-up)
# Generate synthetic data
The code below contains helper functions to generate synthetic data in the form of a `1x7` numpy array representing the features of a house.
The first entry in the array is the randomly generated price of a house. The remaining entries are the features (i.e. number of bedroom, square feet, number of bathrooms, etc.).
These functions will be used to generate synthetic data for training, validation, and testing. It will also allow us to submit synthetic payloads for inference to test our multi-model endpoint.
```
import numpy as np
import pandas as pd
import time
NUM_HOUSES_PER_LOCATION = 1000
LOCATIONS = ['NewYork_NY', 'LosAngeles_CA', 'Chicago_IL', 'Houston_TX', 'Dallas_TX',
'Phoenix_AZ', 'Philadelphia_PA', 'SanAntonio_TX', 'SanDiego_CA', 'SanFrancisco_CA']
PARALLEL_TRAINING_JOBS = 4 # len(LOCATIONS) if your account limits can handle it
MAX_YEAR = 2019
def gen_price(house):
_base_price = int(house['SQUARE_FEET'] * 150)
_price = int(_base_price + (10000 * house['NUM_BEDROOMS']) + \
(15000 * house['NUM_BATHROOMS']) + \
(15000 * house['LOT_ACRES']) + \
(15000 * house['GARAGE_SPACES']) - \
(5000 * (MAX_YEAR - house['YEAR_BUILT'])))
return _price
def gen_random_house():
_house = {'SQUARE_FEET': int(np.random.normal(3000, 750)),
'NUM_BEDROOMS': np.random.randint(2, 7),
'NUM_BATHROOMS': np.random.randint(2, 7) / 2,
'LOT_ACRES': round(np.random.normal(1.0, 0.25), 2),
'GARAGE_SPACES': np.random.randint(0, 4),
'YEAR_BUILT': min(MAX_YEAR, int(np.random.normal(1995, 10)))}
_price = gen_price(_house)
return [_price, _house['YEAR_BUILT'], _house['SQUARE_FEET'],
_house['NUM_BEDROOMS'], _house['NUM_BATHROOMS'],
_house['LOT_ACRES'], _house['GARAGE_SPACES']]
def gen_houses(num_houses):
_house_list = []
for i in range(num_houses):
_house_list.append(gen_random_house())
_df = pd.DataFrame(_house_list,
columns=['PRICE', 'YEAR_BUILT', 'SQUARE_FEET', 'NUM_BEDROOMS',
'NUM_BATHROOMS','LOT_ACRES', 'GARAGE_SPACES'])
return _df
```
# Train multiple house value prediction models
In the follow section, we are setting up the code to train a house price prediction model for each of 4 different cities.
As such, we will launch multiple training jobs asynchronously, using the XGBoost algorithm.
In this notebook, we will be using the AWS Managed XGBoost Image for both training and inference - this image provides native support for launching multi-model endpoints.
```
import sagemaker
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import image_uris
import boto3
from sklearn.model_selection import train_test_split
s3 = boto3.resource('s3')
sagemaker_session = sagemaker.Session()
role = get_execution_role()
BUCKET = sagemaker_session.default_bucket()
# This is references the AWS managed XGBoost container
XGBOOST_IMAGE = image_uris.retrieve(region=boto3.Session().region_name, framework='xgboost', version='1.0-1')
DATA_PREFIX = 'XGBOOST_BOSTON_HOUSING'
MULTI_MODEL_ARTIFACTS = 'multi_model_artifacts'
TRAIN_INSTANCE_TYPE = 'ml.m4.xlarge'
ENDPOINT_INSTANCE_TYPE = 'ml.m4.xlarge'
ENDPOINT_NAME = 'mme-xgboost-housing'
MODEL_NAME = ENDPOINT_NAME
```
### Split a given dataset into train, validation, and test
The code below will generate 3 sets of data. 1 set to train, 1 set for validation and 1 for testing.
```
SEED = 7
SPLIT_RATIOS = [0.6, 0.3, 0.1]
def split_data(df):
# split data into train and test sets
seed = SEED
val_size = SPLIT_RATIOS[1]
test_size = SPLIT_RATIOS[2]
num_samples = df.shape[0]
X1 = df.values[:num_samples, 1:] # keep only the features, skip the target, all rows
Y1 = df.values[:num_samples, :1] # keep only the target, all rows
# Use split ratios to divide up into train/val/test
X_train, X_val, y_train, y_val = \
train_test_split(X1, Y1, test_size=(test_size + val_size), random_state=seed)
# Of the remaining non-training samples, give proper ratio to validation and to test
X_test, X_test, y_test, y_test = \
train_test_split(X_val, y_val, test_size=(test_size / (test_size + val_size)),
random_state=seed)
# reassemble the datasets with target in first column and features after that
_train = np.concatenate([y_train, X_train], axis=1)
_val = np.concatenate([y_val, X_val], axis=1)
_test = np.concatenate([y_test, X_test], axis=1)
return _train, _val, _test
```
### Launch a single training job for a given housing location
There is nothing specific to multi-model endpoints in terms of the models it will host. They are trained in the same way as all other SageMaker models. Here we are using the XGBoost estimator and not waiting for the job to complete.
```
def launch_training_job(location):
# clear out old versions of the data
s3_bucket = s3.Bucket(BUCKET)
full_input_prefix = f'{DATA_PREFIX}/model_prep/{location}'
s3_bucket.objects.filter(Prefix=full_input_prefix + '/').delete()
# upload the entire set of data for all three channels
local_folder = f'data/{location}'
inputs = sagemaker_session.upload_data(path=local_folder, key_prefix=full_input_prefix)
print(f'Training data uploaded: {inputs}')
_job = 'xgb-{}'.format(location.replace('_', '-'))
full_output_prefix = f'{DATA_PREFIX}/model_artifacts/{location}'
s3_output_path = f's3://{BUCKET}/{full_output_prefix}'
xgb = sagemaker.estimator.Estimator(XGBOOST_IMAGE, role,
instance_count=1, instance_type=TRAIN_INSTANCE_TYPE,
output_path=s3_output_path, base_job_name=_job,
sagemaker_session=sagemaker_session)
xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, silent=0,
early_stopping_rounds=5, objective='reg:linear', num_round=25)
DISTRIBUTION_MODE = 'FullyReplicated'
train_input = sagemaker.inputs.TrainingInput(s3_data=inputs+'/train',
distribution=DISTRIBUTION_MODE, content_type='csv')
val_input = sagemaker.inputs.TrainingInput(s3_data=inputs+'/val',
distribution=DISTRIBUTION_MODE, content_type='csv')
remote_inputs = {'train': train_input, 'validation': val_input}
xgb.fit(remote_inputs, wait=False)
# Return the estimator object
return xgb
```
### Kick off a model training job for each housing location
```
def save_data_locally(location, train, val, test):
os.makedirs(f'data/{location}/train')
np.savetxt( f'data/{location}/train/{location}_train.csv', train, delimiter=',', fmt='%.2f')
os.makedirs(f'data/{location}/val')
np.savetxt(f'data/{location}/val/{location}_val.csv', val, delimiter=',', fmt='%.2f')
os.makedirs(f'data/{location}/test')
np.savetxt(f'data/{location}/test/{location}_test.csv', test, delimiter=',', fmt='%.2f')
import shutil
import os
estimators = []
shutil.rmtree('data', ignore_errors=True)
for loc in LOCATIONS[:PARALLEL_TRAINING_JOBS]:
_houses = gen_houses(NUM_HOUSES_PER_LOCATION)
_train, _val, _test = split_data(_houses)
save_data_locally(loc, _train, _val, _test)
estimator = launch_training_job(loc)
estimators.append(estimator)
print()
print(f'{len(estimators)} training jobs launched: {[x.latest_training_job.job_name for x in estimators]}')
```
### Wait for all model training to finish
```
def wait_for_training_job_to_complete(estimator):
job = estimator.latest_training_job.job_name
print(f'Waiting for job: {job}')
status = estimator.latest_training_job.describe()['TrainingJobStatus']
while status == 'InProgress':
time.sleep(45)
status = estimator.latest_training_job.describe()['TrainingJobStatus']
if status == 'InProgress':
print(f'{job} job status: {status}')
print(f'DONE. Status for {job} is {status}\n')
for est in estimators:
wait_for_training_job_to_complete(est)
```
# Create the multi-model endpoint with the SageMaker SDK
### Create a SageMaker Model from one of the Estimators
```
estimator = estimators[0]
model = estimator.create_model(role=role, image_uri=XGBOOST_IMAGE)
```
### Create the Amazon SageMaker MultiDataModel entity
We create the multi-model endpoint using the [```MultiDataModel```](https://sagemaker.readthedocs.io/en/stable/api/inference/multi_data_model.html) class.
You can create a MultiDataModel by directly passing in a `sagemaker.model.Model` object - in which case, the Endpoint will inherit information about the image to use, as well as any environmental variables, network isolation, etc., once the MultiDataModel is deployed.
In addition, a MultiDataModel can also be created without explictly passing a `sagemaker.model.Model` object. Please refer to the documentation for additional details.
```
from sagemaker.multidatamodel import MultiDataModel
# This is where our MME will read models from on S3.
model_data_prefix = f's3://{BUCKET}/{DATA_PREFIX}/{MULTI_MODEL_ARTIFACTS}/'
mme = MultiDataModel(name=MODEL_NAME,
model_data_prefix=model_data_prefix,
model=model,# passing our model - passes container image needed for the endpoint
sagemaker_session=sagemaker_session)
```
# Deploy the Multi Model Endpoint
You need to consider the appropriate instance type and number of instances for the projected prediction workload across all the models you plan to host behind your multi-model endpoint. The number and size of the individual models will also drive memory requirements.
```
predictor = mme.deploy(initial_instance_count=1,
instance_type=ENDPOINT_INSTANCE_TYPE,
endpoint_name=ENDPOINT_NAME)
```
### Our endpoint has launched! Let's look at what models are available to the endpoint!
By 'available', what we mean is, what model artfiacts are currently stored under the S3 prefix we defined when setting up the `MultiDataModel` above i.e. `model_data_prefix`.
Currently, since we have no artifacts (i.e. `tar.gz` files) stored under our defined S3 prefix, our endpoint, will have no models 'available' to serve inference requests.
We will demonstrate how to make models 'available' to our endpoint below.
```
# No models visible!
list(mme.list_models())
```
### Lets deploy model artifacts to be found by the endpoint
We are now using the `.add_model()` method of the `MultiDataModel` to copy over our model artifacts from where they were initially stored, during training, to where our endpoint will source model artifacts for inference requests.
`model_data_source` refers to the location of our model artifact (i.e. where it was deposited on S3 after training completed)
`model_data_path` is the **relative** path to the S3 prefix we specified above (i.e. `model_data_prefix`) where our endpoint will source models for inference requests.
Since this is a **relative** path, we can simply pass the name of what we wish to call the model artifact at inference time (i.e. `Chicago_IL.tar.gz`)
### Dynamically deploying additional models
It is also important to note, that we can always use the `.add_model()` method, as shown below, to dynamically deploy more models to the endpoint, to serve up inference requests as needed.
```
for est in estimators:
artifact_path = est.latest_training_job.describe()['ModelArtifacts']['S3ModelArtifacts']
model_name = artifact_path.split('/')[-4]+'.tar.gz'
# This is copying over the model artifact to the S3 location for the MME.
mme.add_model(model_data_source=artifact_path, model_data_path=model_name)
```
## We have added the 4 model artifacts from our training jobs!
We can see that the S3 prefix we specified when setting up `MultiDataModel` now has 4 model artifacts. As such, the endpoint can now serve up inference requests for these models.
```
list(mme.list_models())
```
# Get predictions from the endpoint
Recall that ```mme.deploy()``` returns a [RealTimePredictor](https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/predictor.py#L35) that we saved in a variable called ```predictor```.
We will use ```predictor``` to submit requests to the endpoint.
XGBoost supports ```text/csv``` for the content type and accept type. For more information on XGBoost Input/Output Interface, please see [here.](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.html#InputOutput-XGBoost)
Since the default RealTimePredictor does not have a serializer or deserializer set for requests, we will also set these.
This will allow us to submit a python list for inference, and get back a float response.
```
from sagemaker.serializers import CSVSerializer
from sagemaker.deserializers import JSONDeserializer
predictor.serializer = CSVSerializer()
predictor.deserializer = JSONDeserializer()
#predictor.content_type =predictor.content_type , removed as mentioned https://github.com/aws/sagemaker-python-sdk/blob/e8d16f8bc4c570f763f1129afc46ba3e0b98cdad/src/sagemaker/predictor.py#L82
#predictor.accept = "text/csv" # removed also : https://github.com/aws/sagemaker-python-sdk/blob/e8d16f8bc4c570f763f1129afc46ba3e0b98cdad/src/sagemaker/predictor.py#L83
```
### Invoking models on a multi-model endpoint
Notice the higher latencies on the first invocation of any given model. This is due to the time it takes SageMaker to download the model to the Endpoint instance and then load the model into the inference container. Subsequent invocations of the same model take advantage of the model already being loaded into the inference container.
```
start_time = time.time()
predicted_value = predictor.predict(data=gen_random_house()[1:], target_model='Chicago_IL.tar.gz')
duration = time.time() - start_time
print('${:,.2f}, took {:,d} ms\n'.format(predicted_value[0], int(duration * 1000)))
start_time = time.time()
#Invoke endpoint
predicted_value = predictor.predict(data=gen_random_house()[1:], target_model='Chicago_IL.tar.gz')
duration = time.time() - start_time
print('${:,.2f}, took {:,d} ms\n'.format(predicted_value[0], int(duration * 1000)))
start_time = time.time()
#Invoke endpoint
predicted_value = predictor.predict(data=gen_random_house()[1:], target_model='Houston_TX.tar.gz')
duration = time.time() - start_time
print('${:,.2f}, took {:,d} ms\n'.format(predicted_value[0], int(duration * 1000)))
start_time = time.time()
#Invoke endpoint
predicted_value = predictor.predict(data=gen_random_house()[1:], target_model='Houston_TX.tar.gz')
duration = time.time() - start_time
print('${:,.2f}, took {:,d} ms\n'.format(predicted_value[0], int(duration * 1000)))
```
### Updating a model
To update a model, you would follow the same approach as above and add it as a new model. For example, if you have retrained the `NewYork_NY.tar.gz` model and wanted to start invoking it, you would upload the updated model artifacts behind the S3 prefix with a new name such as `NewYork_NY_v2.tar.gz`, and then change the `target_model` field to invoke `NewYork_NY_v2.tar.gz` instead of `NewYork_NY.tar.gz`. You do not want to overwrite the model artifacts in Amazon S3, because the old version of the model might still be loaded in the containers or on the storage volume of the instances on the endpoint. Invocations to the new model could then invoke the old version of the model.
Alternatively, you could stop the endpoint and re-deploy a fresh set of models.
## Using Boto APIs to invoke the endpoint
While developing interactively within a Jupyter notebook, since `.deploy()` returns a `RealTimePredictor` it is a more seamless experience to start invoking your endpoint using the SageMaker SDK. You have more fine grained control over the serialization and deserialization protocols to shape your request and response payloads to/from the endpoint.
This is great for iterative experimentation within a notebook. Furthermore, should you have an application that has access to the SageMaker SDK, you can always import `RealTimePredictor` and attach it to an existing endpoint - this allows you to stick to using the high level SDK if preferable.
Additional documentation on `RealTimePredictor` can be found [here.](https://sagemaker.readthedocs.io/en/stable/api/inference/predictors.html?highlight=RealTimePredictor#sagemaker.predictor.RealTimePredictor)
The lower level Boto3 SDK may be preferable if you are attempting to invoke the endpoint as a part of a broader architecture.
Imagine an API gateway frontend that uses a Lambda Proxy in order to transform request payloads before hitting a SageMaker Endpoint - in this example, Lambda does not have access to the SageMaker Python SDK, and as such, Boto3 can still allow you to interact with your endpoint and serve inference requests.
Boto3 allows for quick injection of ML intelligence via SageMaker Endpoints into existing applications with minimal/no refactoring to existing code.
Boto3 will submit your requests as a binary payload, while still allowing you to supply your desired `Content-Type` and `Accept` headers with serialization being handled by the inference container in the SageMaker Endpoint.
Additional documentation on `.invoke_endpoint()` can be found [here.](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker-runtime.html)
```
import boto3
import json
runtime_sm_client = boto3.client(service_name='sagemaker-runtime')
def predict_one_house_value(features, model_name):
print(f'Using model {model_name} to predict price of this house: {features}')
# Notice how we alter the list into a string as the payload
body = ','.join(map(str, features)) + '\n'
start_time = time.time()
response = runtime_sm_client.invoke_endpoint(
EndpointName=ENDPOINT_NAME,
ContentType='text/csv',
TargetModel=model_name,
Body=body)
predicted_value = json.loads(response['Body'].read())[0]
duration = time.time() - start_time
print('${:,.2f}, took {:,d} ms\n'.format(predicted_value, int(duration * 1000)))
predict_one_house_value(gen_random_house()[1:], 'Chicago_IL.tar.gz')
```
## Clean up
Here, to be sure we are not billed for endpoints we are no longer using, we clean up.
```
predictor.delete_endpoint()
predictor.delete_model()
```
| github_jupyter |
# Continuous Control
---
In this notebook, you will learn how to use the Unity ML-Agents environment for the second project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program.
### 1. Start the Environment
We begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).
```
from unityagents import UnityEnvironment
import numpy as np
```
Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.
- **Mac**: `"path/to/Reacher.app"`
- **Windows** (x86): `"path/to/Reacher_Windows_x86/Reacher.exe"`
- **Windows** (x86_64): `"path/to/Reacher_Windows_x86_64/Reacher.exe"`
- **Linux** (x86): `"path/to/Reacher_Linux/Reacher.x86"`
- **Linux** (x86_64): `"path/to/Reacher_Linux/Reacher.x86_64"`
- **Linux** (x86, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86"`
- **Linux** (x86_64, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86_64"`
For instance, if you are using a Mac, then you downloaded `Reacher.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:
```
env = UnityEnvironment(file_name="Reacher.app")
```
```
env = UnityEnvironment(file_name='/home/payal/Documents/deep-reinforcement-learning/p2-continuous-control//Reacher_Linux_NoVis/Reacher.x86_64')
```
Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
```
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
```
### 2. Examine the State and Action Spaces
In this environment, a double-jointed arm can move to target locations. A reward of `+0.1` is provided for each step that the agent's hand is in the goal location. Thus, the goal of your agent is to maintain its position at the target location for as many time steps as possible.
The observation space consists of `33` variables corresponding to position, rotation, velocity, and angular velocities of the arm. Each action is a vector with four numbers, corresponding to torque applicable to two joints. Every entry in the action vector must be a number between `-1` and `1`.
The simulation contains `20` identical agents, each with its own copy of the environment.
Run the code cell below to print some information about the environment.
```
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
```
### 3. Take Random Actions in the Environment
In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment.
Once this cell is executed, you will watch the agent's performance, if it selects an action at random with each time step. A window should pop up that allows you to observe the agent, as it moves through the environment.
Of course, as part of the project, you'll have to change the code so that the agent is able to use its experience to gradually choose better actions when interacting with the environment!
```
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
actions = np.clip(actions, -1, 1) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))
```
When finished, you can close the environment.
```
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=256, fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=128, fc2_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
state = nn.BatchNorm1d(state)
xs = F.relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
import numpy as np
import random
import time
import copy
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e6)
BATCH_SIZE = 1024
GAMMA = 0.99
TAU = 1e-3
LR_ACTOR = 1e-3
LR_CRITIC = 1e-3
WEIGHT_DECAY = 0.0000
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, random_seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
self.noise = OUNoise(action_size, random_seed)
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def step(self, state, action, reward, next_state, done, time_step):
for s, a, r, ns, d in zip(state, action, reward, next_state, done):
self.memory.add(s, a, r, ns, d)
if len(self.memory) > BATCH_SIZE and time_step % 20 == 0:
for _ in range(10):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return action
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
self.critic_optimizer.zero_grad()
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.critic_optimizer.step()
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
agent = Agent(state_size=state_size, action_size=action_size, random_seed=0)
def ddpg(n_episodes=2000, max_steps=1000):
scores_hundred_queue = deque(maxlen=100)
scores = []
for i_episode in range(1, n_episodes+1):
then = time.time()
average_score = 0
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
scores_agents = np.zeros(num_agents)
agent.reset()
score = 0
for step in range(max_steps):
actions = agent.act(states)
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations
rewards = env_info.rewards
dones = env_info.local_done
agent.step(states, actions, rewards, next_states, dones, step)
states = next_states
scores_agents += rewards
if np.any(dones):
break
score = np.mean(scores_agents)
scores_hundred_queue.append(score)
average_score = np.mean(scores_hundred_queue)
scores.append(score)
print("Episode: ", i_episode)
print("Min Score: {:.2f} Max Score: {:.2f}".format(scores_agents.min(), scores_agents.max()))
print("Score: {:.2f}".format(score))
print("AvgScore: {:.2f}".format(average_score))
now = time.time()
print("Time: {:.2f} secs\n".format(now - then))
if average_score > 30:
torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')
torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')
break
return scores
scores = ddpg()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
```
| github_jupyter |
If not explicitly mentioned otherwise we assume:
- RCP2.6 scenario or the lowest ppm concentration reported (stabilized around 400-420)
- Linear phase-out of fossil fuels from model start time (2000-2015) by 2100
- BAU scenario would lead to RCP6 or higher
- as it is widely accepcted that in order to obtain RCP2.6, emissions must at least cease or turn into removals in the geological near-term (throughout this century), therefore whenever the carbon price is given in terms of percentage reduction from current levels, a linear 100% reduction is assumed from model start time (2000-2015) by 2100
- if ranges are reported, the mean is taken
- if the model reports price in dollar per ton of carbon, it is converted to dollar per ton of carbon dioxide
```
import pandas as pd, numpy as np, matplotlib.pyplot as plt, matplotlib as mpl
%matplotlib inline
mpl.style.use('classic')
d=[]
#d.append(pd.read_csv('carbon/alberth_hope2006.csv',header=None))
#d.append(pd.read_csv('carbon/alberth_hope2006_2.csv',header=None))
d.append(pd.read_csv('carbon/bauer2012.csv',header=None))
d.append(pd.read_csv('carbon/bauer2012_2a.csv',header=None))
d.append(pd.read_csv('carbon/bauer2012_2b.csv',header=None))
d.append(pd.read_csv('carbon/bauer2012_2c.csv',header=None))
d.append(pd.read_csv('carbon/bosetti2014a.csv',header=None))
d.append(pd.read_csv('carbon/bosetti2014b.csv',header=None))
d.append(pd.read_csv('carbon/bosetti2014c.csv',header=None))
d.append(pd.read_csv('carbon/cai2015.csv',header=None))
d.append(pd.read_csv('carbon/chen2005.csv',header=None))
d.append(pd.read_csv('carbon/edmonds_GCAM1994.csv',header=None))
d.append(pd.read_csv('carbon/kriegler2015_2.csv',header=None))
#d.append(pd.read_csv('carbon/luderer_REMIND2015.csv',header=None))
d.append(pd.read_csv('carbon/manne_richels_MERGE2005.csv',header=None))
d.append(pd.read_csv('carbon/paltsev2005.csv',header=None))
d.append(pd.read_csv('carbon/russ_POLES2012.csv',header=None))
d.append(pd.read_csv('carbon/wilkerson2015.csv',header=None))
from scipy.interpolate import interp1d
kd=[]
fd=[]
for z in range(len(d)):
kd.append({})
for i in range(len(d[z][0])):
if ~np.isnan(d[z][0][i]):
kd[z][np.round(d[z][0][i],0)]=d[z][1][i]
fd.append(interp1d(sorted(kd[z].keys()),[kd[z][j] for j in sorted(kd[z].keys())]))
for z in range(len(d)):
#plt.scatter(d[z][0],d[z][1])
years=range(int(min(d[z][0]))+1,int(max(d[z][0]))+1)
plt.plot(years,fd[z](years))
labels=['Bauer, Hilaire et al.\n2012 | REMIND-R',\
'Luderer, Bosetti et al.\n2011 | IMACLIM-R',\
'Luderer, Bosetti et al.\n2011 | REMIND-R',\
'Luderer, Bosetti et al.\n2011 | WITCH',\
'Bosetti, Marangoni et al.\n2015 | GCAM',\
'Bosetti, Marangoni et al.\n2015 | MARKAL US',\
'Bosetti, Marangoni et al.\n2015 | WITCH',\
'Cai, Newth et al.\n2015 | GTEM-C',\
'Chen, 2005\nMARKAL-MACRO',\
'Edmonds, Wise, MacCracken\n1994 | GCAM',\
'Kriegler, Petermann, et al.\n2015 | multiple',\
'Manne, Richels\n2005 | MERGE',\
'Paltsev, Reilly et al.\n2005 | MIT EPPA',\
'Russ, Ciscar et al.\n2009 | POLES',\
'Wilkerson, Leibowicz et al.\n2015 | multiple'\
]
co2=[1,1,1,1,0,0,0,1,0,0,1,0,0,0,1]
z=14
plt.scatter(d[z][0],d[z][1])
years=range(int(min(d[z][0]))+1,int(max(d[z][0]))+1)
plt.plot(years,fd[z](years))
def plotter(ax,x,y,c,l,z=2,zz=2,step=2,w=-50,w2=30):
yrs=range(x[0]-40,x[len(x)-1]+10)
maxi=[0,0]
maxv=-100
#try a few initial values for maximum rsquared
i=0
for k in range(1,5):
p0 = [1., 1., x[len(x)*k/5]]
fit2 = optimize.leastsq(errfunc,p0,args=(x,y),full_output=True)
ss_err=(fit2[2]['fvec']**2).sum()
ss_tot=((y-y.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
if rsquared>maxv:
maxi=[i,k]
maxv=rsquared
i=maxi[0]
k=maxi[1]
p0 = [1., 1., x[len(x)*k/5], -1+i*0.5]
fit2 = optimize.leastsq(errfunc,p0,args=(x,y),full_output=True)
ss_err=(fit2[2]['fvec']**2).sum()
ss_tot=((y-y.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
ax.scatter(x[::step],y[::step],lw*3,color=c)
#ax.plot(yrs,logist(fit2[0],yrs),color="#006d2c",lw=lw)
ax.plot(yrs,logist(fit2[0],yrs),color="#444444",lw=lw)
#ax.plot(yrs,logist(fit2[0],yrs),color=c,lw=1)
yk=logist([fit2[0][0],fit2[0][1],fit2[0][2],fit2[0][3]],range(3000))
mint=0
maxt=3000
perc=0.1
for i in range(3000):
if yk[i]<perc: mint=i
if yk[i]<1-perc: maxt=i
if z>-1:
coord=len(x)*z/5
ax.annotate('$R^2 = '+str(np.round(rsquared,2))+'$\n'+\
'$\\alpha = '+str(np.round(fit2[0][0],2))+'$\n'+\
'$\\beta = '+str(np.round(fit2[0][1],2))+'$\n'+\
'$\\Delta t = '+str(int(maxt-mint))+'$', xy=(yrs[coord], logist(fit2[0],yrs)[coord]),\
xycoords='data',
xytext=(w, w2), textcoords='offset points', color="#444444",
arrowprops=dict(arrowstyle="->",color='#444444'))
coord=len(x)*zz/5
ax.annotate(l, xy=(yrs[coord], logist(fit2[0],yrs)[coord]),\
xycoords='data',
xytext=(w, w2), textcoords='offset points',
arrowprops=dict(arrowstyle="->"))
fig, ax = plt.subplots(1,1,subplot_kw=dict(axisbg='#EEEEEE',axisbelow=True),figsize=(10,5))
lw=2
colors=["#756bb1","#d95f0e","#444444"]
ax.grid(color='white', linestyle='solid')
ax.set_xlabel('Years')
ax.set_ylabel('Carbon tax $[\$/tonCO_2]$')
ax.set_xlim([2000,2100])
ax.set_ylim([0,5000])
#ax.set_yscale('log')
ax.set_title('Carbon price estimations from various IAM models',size=13,y=1.04)
loc=[2088,2083,2084,2080,2031,2047,2043,2088,2015,2072,2050,2075,2095,2020,2062]
lz=[(-70, 20),(-70, 20),(-20, 10),(-40, 20),(-100, 40),(-110, 20),(-130, 20),(-15, 15),\
(-70, 20),(-105, 20),(-80, 20),(-60, 12),(-120, -5),(-70, 50),(-30, 7)]
for z in range(len(d))[:15]:
#ax.scatter(d[z][0],d[z][1])
years=range(int(min(d[z][0]))+1,int(max(d[z][0]))+1)
if (co2[z]==1):k=1
else: k=44.0/12.0
ax.plot(years,fd[z](years)*k,lw=lw,color=colors[z%3])
ax.annotate(labels[z]+str(z), xy=(loc[z],fd[z]([loc[z]])*k),\
xycoords='data',
xytext=lz[z], textcoords='offset points',fontsize=9, color=colors[z%3],
arrowprops=dict(arrowstyle="->",color=colors[z%3]))
#plt.savefig('ces9.png',bbox_inches = 'tight', pad_inches = 0.1, dpi=150)
plt.show()
fig, ax = plt.subplots(1,1,subplot_kw=dict(axisbg='#EEEEEE',axisbelow=True),figsize=(10,5))
lw=2
colors=["#756bb1","#d95f0e","#444444"]
ax.grid(color='white', linestyle='solid')
ax.set_xlabel('Years')
ax.set_ylabel('$MAC$ $[\$/tonCO_2]$')
ax.set_xlim([2000,2100])
ax.set_ylim([0,5000])
#ax.set_yscale('log')
ax.set_title(u'Marginal abatement cost $(MAC)$ estimations from various IAM models',size=13,y=1.04)
loc=[2088,2070,2084,2070,2031,2047,2043,2088,2015,2072,2065,2075,2095,2019,2062]
lz=[(-60, 20),(-75, 20),(-20, 10),(-70, 20),(-100, 40),(-110, 20),(-130, 20),(-15, 15),\
(-70, 20),(-90, 20),(-70, 20),(-70, 12),(-120, -5),(-60, 50),(-30, 7)]
for z in range(len(d))[:15]:
#ax.scatter(d[z][0],d[z][1])
if z not in {0,9,14}:
years=range(int(min(d[z][0]))+1,int(max(d[z][0]))+1)
if (co2[z]==1):k=1
else: k=44.0/12.0
if z in {3,6,7,12}:
lw=3
c=colors[2]
elif z in {0,1,2,5}:
lw=1
c=colors[1]
else:
lw=1
c=colors[0]
ax.plot(years,fd[z](years)*k,lw=lw,color=c)
ax.annotate(labels[z], xy=(loc[z],fd[z]([loc[z]])*k),\
xycoords='data',
xytext=lz[z], textcoords='offset points',fontsize=9, color=c,
arrowprops=dict(arrowstyle="->",color=c))
plt.savefig('ces9b.png',bbox_inches = 'tight', pad_inches = 0.1, dpi=150)
plt.show()
for z in range(len(d))[:15]:
print labels[z]
```
| github_jupyter |
```
import xarray as xr
import xroms
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cmocean.cm as cmo
import cartopy
```
# How to select data
The [load_data](load_data.ipynb) notebook demonstrates how to load in data, but now how to select out parts of it?
### Load in data
More information at in [load_data notebook](load_data.ipynb)
```
loc = 'http://barataria.tamu.edu:8080/thredds/dodsC/forecast_latest/txla2_his_f_latest.nc'
chunks = {'ocean_time':1}
ds = xr.open_dataset(loc, chunks=chunks)
# set up grid
ds, grid = xroms.roms_dataset(ds)
```
## Select
### Slices by index or keyword
#### Surface layer slice
The surface in ROMS is given by the last index in the vertical dimension. The easiest way to access this is by indexing into `s_rho`. While normally it is better to access coordinates through keywords to be human-readable, it's not easy to tell what value of `s_rho` gives the surface. In this instance, it's easier to just go by index.
```
ds.salt.isel(s_rho=-1)
```
#### x/y index slice
For a curvilinear ROMS grid, selecting by the dimensions `xi_rho` or `eta_rho` (or for whichever is the relevant grid) is not very meaningful because they are given by index. Thus the following is possible to get a slice along the index, but it cannot be used to find a slice based on the lon/lat values.
```
ds.temp.sel(xi_rho=20)
```
#### Single time
Find the forecast model output available that is closest to now. Note that the `method` keyword argument is not necessary if the desired date/time is exactly a model output time.
```
now = pd.Timestamp.today()
ds.salt.isel(s_rho=-1).sel(ocean_time=now, method='nearest')
```
#### Range of time
```
ds.salt.sel(ocean_time=slice(now,now+pd.Timedelta('2 days')))
```
### Calculate slice
#### Cross-section along a longitude value
Because the example grid is curvilinear, a slice along a grid dimension is not the same as a slice along a longitude or latitude (or projected $x$/$y$) value. This needs to be calculated and we can use the `xisoslice` function to do this. The calculation is done lazily. We calculate only part of the slice, on the continental shelf. Renaming the subsetted dataset (below, as `dss`) is convenient because this variable can be used in place of `ds` for all related function calls to be consistent and only have to subset one time.
```
# want salinity along this constant value
lon0 = -91.5
# This is the array we want projected onto the longitude value.
# Note that we are requesting multiple times at once.
dss = ds.isel(ocean_time=slice(0,10), eta_rho=slice(50,-1))
# Projecting 3rd input onto constant value lon0 in iso_array ds.lon_rho
sl = xroms.xisoslice(dss.lon_rho, lon0, dss.salt, 'xi_rho')
sl
fig, axes = plt.subplots(1, 2, figsize=(15,6))
sl.isel(ocean_time=0).plot(ax=axes[0])
sl.isel(ocean_time=-1).plot(ax=axes[1])
```
Better plot: use coordinates and one colorbar to compare.
```
# calculate z values (s_rho)
slz = xroms.xisoslice(dss.lon_rho, lon0, dss.z_rho, 'xi_rho')
# calculate latitude values (eta_rho)
sllat = xroms.xisoslice(dss.lon_rho, lon0, dss.lat_rho, 'xi_rho')
# assign these as coords to be used in plot
sl = sl.assign_coords(z=slz, lat=sllat)
# points that should be masked
slmask = xroms.xisoslice(dss.lon_rho, lon0, dss.mask_rho, 'xi_rho')
# drop masked values
sl = sl.where(slmask==1, drop=True)
# find min and max of the slice itself (without values that should be masked)
vmin = sl.min().values
vmax = sl.max().values
fig, axes = plt.subplots(1, 2, figsize=(15,6), sharey=True)
sl.isel(ocean_time=0).plot(x='lat', y='z', ax=axes[0], vmin=vmin, vmax=vmax, add_colorbar=False)
mappable = sl.isel(ocean_time=-1).plot(x='lat', y='z', ax=axes[1], vmin=vmin, vmax=vmax, add_colorbar=False)
fig.colorbar(ax=axes, mappable=mappable, orientation='horizontal').set_label('salt')
```
Verify performance of isoslice by comparing slice at surface with planview surface plot.
```
vmin = dss.salt.min().values
vmax = dss.salt.max().values
fig, ax = plt.subplots(1, 1, figsize=(15,15))
ds.salt.isel(ocean_time=0, s_rho=-1).plot(ax=ax, x='lon_rho', y='lat_rho')
ax.scatter(lon0*np.ones_like(sl.lat[::10]), sl.lat[::10], c=sl.isel(ocean_time=0, s_rho=-1)[::10],
s=100, vmin=vmin, vmax=vmax, zorder=10, edgecolor='k')
```
#### Variable at constant z value
```
# want temperature along this constant depth value
z0 = -10
# This is the array we want projected
dss = ds.isel(ocean_time=0)
# Projecting 3rd input onto constant value z0 in iso_array (1st input)
sl = xroms.xisoslice(dss.z_rho, z0, dss.temp, 's_rho')
sl
sl.plot(cmap=cmo.thermal, x='lon_rho', y='lat_rho')
```
#### Variable at constant z depth, in time
```
# want temperature along this constant depth value
z0 = -10
# Projecting 3rd input onto constant value z0 in iso_array (1st input)
sl = xroms.xisoslice(ds.z_rho, z0, ds.temp, 's_rho')
sl
```
#### zeta at constant z depth, in time
... to verify that xisoslice does act in time across zeta.
```
# want temperature along this constant depth value
z0 = -10
# Projecting 3rd input onto constant value z0 in iso_array (1st input)
zeta_s_rho = ds.zeta.expand_dims({'s_rho': ds.s_rho}).transpose('ocean_time','s_rho',...)
sl = xroms.xisoslice(ds.z_rho, z0, zeta_s_rho, 's_rho')
sl.sel(eta_rho=30,xi_rho=20).plot()
```
#### Depth of isohaline surface
Calculate the depth of a specific isohaline.
Note that in this case there are a few wonky values, so we should filter them out or control the vmin/vmax values on the plot.
```
# want the depth of this constant salinity value
S0 = 33
# This is the array we want projected
dss = ds.isel(ocean_time=0)
# Projecting 3rd input onto constant value z0 in iso_array (1st input)
sl = xroms.xisoslice(dss.salt, S0, dss.z_rho, 's_rho')
sl.plot(cmap=cmo.deep, x='lon_rho', y='lat_rho', vmin=-20, vmax=0, figsize=(10, 10))
```
### Select region
Select a boxed region by min/max lon and lat values.
```
# want model output only within the box defined by these lat/lon values
lon = np.array([-97, -96])
lat = np.array([28, 29])
# this condition defines the region of interest
box = ((lon[0] < ds.lon_rho) & (ds.lon_rho < lon[1]) & (lat[0] < ds.lat_rho) & (ds.lat_rho < lat[1])).compute()
```
Plot the model output in the box at the surface
```
dss = ds.where(box).salt.isel(s_rho=-1, ocean_time=0)
dss.plot(x='lon_rho', y='lat_rho')
```
Can calculate a metric within the box:
```
dss.mean().values
```
### Find nearest model output in two dimensions
This matters for a curvilinear grid.
Can't use `sel` because it will only search in one coordinate for the nearest value and the coordinates are indices which are not necessarily geographic distance. Instead need to use a search for distance and use that for the `where` condition from the previous example.
Find the model output at the grid node nearest the point (lon0, lat0). You can create the projection to use for the distance calculation in `sel2d` and input it into the function, or you can let it choose a default for you.
```
lon0, lat0 = -96, 27
dl = 0.05
proj = cartopy.crs.LambertConformal(central_longitude=-98, central_latitude=30)
dssub = xroms.sel2d(ds, lon0, lat0, proj)
```
Or, if you instead want the indices of the nearest grid node returned, you can call `argsel2d`:
```
ix, iy = xroms.argsel2d(ds, lon0, lat0, proj)
```
Check this function, just to be sure:
```
box = (ds.lon_rho>lon0-dl) & (ds.lon_rho<lon0+dl) & (ds.lat_rho>lat0-dl) & (ds.lat_rho<lat0+dl)
dss = ds.where(box).salt.isel(ocean_time=0, s_rho=-1)
vmin = dss.min().values
vmax = dss.max().values
dss.plot(x='lon_rho', y='lat_rho')
plt.scatter(lon0, lat0, c=dssub.salt.isel(s_rho=-1, ocean_time=0), s=200, edgecolor='k', vmin=vmin, vmax=vmax)
plt.xlim(lon0-dl,lon0+dl)
plt.ylim(lat0-dl, lat0+dl)
```
Note that the `sel2d` function returned a time series since that was input, and it worked fine. Getting the numbers take time.
```
dssub.salt.isel(s_rho=-1, ocean_time=slice(0,5)).plot()
```
| github_jupyter |
## Viscous Inverse Design
This notebook demonstrates the use of gradients from viiflow for fully viscous inverse design.
It defines a target pressure distribution from one airfoil and, coming from another airfoil, tries to find the shape necessary to arrive at this target pressure.
It uses virtual displacements, which do not necessitate the recalculation of the panel operator.
Instead, it uses the same model used for the effect of boundary layer thickness onto the flow for modification of the airfoil shape.
The heart of this notebook is a Gauss-Newton iteration which solves for these virtual displacements.
Instead of trying to solve the pressure distribution exactly, the iteration sovles a least-squares problem that joins the pressure difference with regularizing terms.
Fully viscous inverse design is not a straightforward problem. There are several ways an optimizer may *cheat*, for example
* The velocity is defined by the inviscid solution of the airfoil shape plus boundary layer thickness. An optimizer can therefore choose to reduce the thickness of the airfoil if for some reason a thick boundary layer leads to the target velocity distribution.
* Kinks in the desired velocity are, in the case below, due to laminar-turbulent transition. However, an optimizer can choose to model this kink by an actual kink in the airfoil.
To alleviate this, the pressure error is appended by a regularizing term that penalizes non-smooth displacements - simply by adding $ \frac{\mathrm{d}^2}{\mathrm{d}^2 s} \delta_{virtual}(s) $ at every point along the airfoil surface coordinate $s$ to the Least-Squares problem.
The parameters chosen to increrase/decrease the penalties were chosen ad-hoc by trial and error.
In addition, the nodes very close to the stagnation point are not modified.
In addition, the residual $r$ of the viiflow solver itself is added to the Least-Squares problem and scaled such that at convergence its error is sufficiently low.
Every iteration then performs for dispalcements $y$ and the viiflow variables $x$
$$
y^{k+1} = y^k - \lambda {\Delta y}^k\\
x^{k+1} = x^k - \lambda {\Delta x}^k\\
{\Delta y}^k, {\Delta x}^k = \min_{\Delta y,\Delta x} \| F(y^k,x^k) - \frac{\partial F}{\partial y}(y^k,x^k) \Delta y - \frac{\partial F}{\partial x}(y^k,x^k) \Delta x\|^2\\
\|F(y,x)\|^2 = \gamma_{cp}^2\|ue(y)-ue_{target}\|^2 + \gamma_y^2\| \frac{\mathrm{d}^2}{\mathrm{d}^2 s} y \|^2 + \gamma_r^2 \|r(y,x)\|^2
$$
This may seem like a large problem, but the effort for solving the overdetermined least-squares problem grows largely with the degrees of freedom, not the amount of equations.
Below, this procedure is used to morph the S805 airfoil into the S825 airfoil. Even with the regularizing terms, little dips that enforce the laminar-turbulent transition can still be seen when zooming in.
While this solves for an airfoil shape of a specified pressure distribution, it is probably not a very smart idea to use this for actual design. A better idea is to use first an inviscid inverse design method, e.g. conformal mapping [1, 2], and remove the discrepancies using a fully viscid iteration.
The benefit of this Gauss-Newton approach is how straightforward additional constraints can be included, e.g. only fit the suction side from .1c onwards or fit multiple target distributions at multiple angles of attack.
```
import viiflow as vf
import viiflowtools.vf_tools as vft
import viiflowtools.vf_plots as vfp
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Analysis Settings
RE = 1e6
ncrit =5
Mach = 0.0
alpha = 4.0
N = 300
# Read Airfoils
BASE = vft.repanel(vft.read_selig("S805.dat"),N,KAPFAC=2)
TARGET = vft.repanel(vft.read_selig("S825.dat"),N,KAPFAC=2)
# Solve target for our target cp (or more precisely edge velocity)
s = vf.setup(Re=RE,Ma=Mach,Ncrit=ncrit,Alpha=alpha)
# Internal iterations
s.Itermax = 100
# Set-up and initialize based on inviscid panel solution
[p,bl,x] = vf.init([TARGET],s)
res = None
grad = None
# Solve aerodynamic problem of target airfoil
vf.iter(x,bl,p,s,None,None)
XT0 = p.foils[0].X[0,:].copy()
UT = p.gamma_viscid[0:p.foils[0].N].copy()
# Set-up and initialize based on inviscid panel solution
[p,bl,x0] = vf.init([BASE],s)
res = None
grad = None
# Solve aerodynamic problem of current airfoil and save for later plotting
[x0,_,res,grad,_] = vf.iter(x0,bl,p,s,None,None)
XC0 = p.foils[0].X[0,:].copy()
UC = p.gamma_viscid[0:p.foils[0].N].copy()
# To interpolate from one grid to the next, suction and pressure side must have unique grid points
# That is why below a grid is created where the pressure side is appended with *-1 at the nose
XT = XT0.copy()
XC = XC0.copy()
XT[np.argmin(XT0)+1::] = 2*XT0[np.argmin(XT0)]-XT0[np.argmin(XT0)+1::]
XC[np.argmin(XC0)+1::] = 2*XC0[np.argmin(XC0)]-XC0[np.argmin(XC0)+1::]
# Interpolate target pressure onto current airfoil grid
UT = np.interp(-XC.flatten(),-XT.flatten(),np.asarray(UT[:,0]).flatten())
# Weighting factors for Gauss-Newton
facx = 500 # Penalty for smooth dioscplacement
fac_err = 5 #Weighting of cp error w.r.t. above penalties
fac_res = 1e4
s.Gradients = True
NAERO = x.shape[0]
NVD = len(XC)
# Set-up and initialize based on inviscid panel solution
[p,bl,x0] = vf.init([BASE],s)
res = None
grad = None
# Solve aerodynamic problem to convergence
[x,_,_,_,_] = vf.iter(x0,bl,p,s,None,None)
fprev = np.inf
# Find ST and do not change near there
II = np.logical_and(np.fabs(XT-XT[bl[0].sti])>0.001,p.foils[0].X[0,:].ravel()>np.amin(p.foils[0].X[0,:].ravel()))
II[0]=False
II[NVD-1]=False
iter = 0
lam = 1.0
y = np.zeros(NVD)
while True:
iter+=1
# Solve Aerodynamic problem
s.Itermax = 0
s.Silent = True
[_,_,res,grad,gradients] = vf.iter(x,bl,p,s,None,None,[y])
# Residual
RESy = fac_err*(p.gamma_viscid[0:p.foils[0].N].A1-UT)
dRESydy = fac_err*gradients.partial.gam_vd[0:NVD,:]
dRESydx = fac_err*gradients.partial.gam_x[0:NVD,:]
# Penalty for thick boundary layer
#REGdelta = bl[0].bl_fl.nodes.delta*facx
#dREGdeltady = gradients.total.delta_vd[0:NVD,:]*facx
# Penalty for smooth displacement
difforder = 2
REGdelta = np.diff(y,difforder)*facx
dREGdeltady = np.diff(np.eye(NVD),difforder,0)*facx
dREGdeltadx = np.zeros((len(REGdelta),len(x)))
# Gauss-Newton step from all terms
F = np.r_[RESy,REGdelta,res*fac_res]
fcurr = np.sqrt(F.T@F)
y0 = y
fprev = fcurr
# Find ST and do not change near there
II = np.logical_and(np.fabs(XT-XT[bl[0].sti])>0.001,p.foils[0].X[0,:].ravel()>np.amin(p.foils[0].X[0,:].ravel()))
II[0]=False
II[NVD-1]=False
dFdy = np.r_[dRESydy,dREGdeltady,gradients.partial.res_vd*fac_res]
dFdx = np.r_[dRESydx,dREGdeltadx,grad*fac_res]
dF = np.c_[dFdy[:,II],dFdx]
dX = -np.linalg.lstsq(dF,F,rcond=None)[0]
dy = dX[0:np.sum(II)]
dx = dX[np.sum(II)::]
lam = 1
# Print
resaero = np.sqrt(np.matmul(res,res.T))
# Ad-hoc Damping
for k in range(len(dy)):
lam = np.fmin(lam,0.005/abs(dy[k])) # Do not move virtual displacement more than 1mm
for k in range(len(x)):
lam = np.fmin(lam,.2/(abs(dx[k]/x[k])))
print("iter %u res p:%f resaero: %f dvd:%f lam:%f"%(iter, np.sqrt(np.matmul(F,F.T)), \
resaero,np.sqrt(np.matmul(dy,dy.T)),lam))
if np.sqrt(np.matmul(dy,dy.T))<1e-4 and resaero<1e-4:
print("Converged")
break
if iter>100:
print("Not Converged (iteration)")
break
j =0
for k in np.argwhere(II):
y[k] += lam*dy[j]
j+=1
x += lam*dx
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
matplotlib.rcParams['figure.figsize'] = [11, 5.5]
fig,ax = plt.subplots(1,1)
ax.plot(p.foils[0].X[0,:],np.power(UC,2)-1,'-k')
ax.plot(p.foils[0].X[0,:],np.power(p.gamma_viscid[0:p.foils[0].N].A1,2)-1,'-',color=(0.6,0.6,0.6))
ax.plot(p.foils[0].X[0,:],np.power(UT,2)-1,'2k')
ax.legend(['Initial Pressure','Found Pressure','Target Pressure'])
xlim = ax.get_xlim()
fig,ax = plt.subplots(1,1)
lines = None
ax.plot(TARGET[0,:],TARGET[1,:],'2k')
lines = vfp.plot_geometry(ax,p,bl,lines)
ax.legend(['Target Airfoil','Initial Geometry','Found Geometry'])
ax.set_xlim(xlim)
```
[1] Selig, Michael S., and Mark D. Maughmer. *Generalized multipoint inverse airfoil design.* AIAA journal 30.11 (1992): 2618-2625.
[2] Drela, Mark. *XFOIL: An analysis and design system for low Reynolds number airfoils.* Low Reynolds number aerodynamics. Springer, Berlin, Heidelberg, 1989. 1-12.
| github_jupyter |
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# Solution Notebook
## Problem: Given two 16 bit numbers, n and m, and two indices i, j, insert m into n such that m starts at bit j and ends at bit i.
* [Constraints](#Constraints)
* [Test Cases](#Test-Cases)
* [Algorithm](#Algorithm)
* [Code](#Code)
* [Unit Test](#Unit-Test)
## Constraints
* Can we assume j > i?
* Yes
* Can we assume i through j have enough space for m?
* Yes
* Can we assume the inputs are valid?
* No
* Can we assume this fits memory?
* Yes
## Test Cases
* None as an input -> Exception
* Negative index for i or j -> Exception
* General case
<pre>
i = 2, j = 6
j i
n = 0000 0100 0011 1101
m = 0000 0000 0001 0011
result = 0000 0100 0100 1101
</pre>
## Algorithm
<pre>
j i
n = 0000 0100 0011 1101
m = 0000 0000 0001 0011
lmask = 1111 1111 1111 1111 -1
lmask = 1111 1111 1000 0000 -1 << (j + 1)
rmask = 0000 0000 0000 0001 1
rmask = 0000 0000 0000 0100 1 << i
rmask = 0000 0000 0000 0011 (1 << i) -1
mask = 1111 1111 1000 0011 lmask | rmask
n = 0000 0100 0011 1101
mask = 1111 1111 1000 0011 n & mask
--------------------------------------------------
n2 = 0000 0100 0000 0001
n2 = 0000 0100 0000 0001
mask2 = 0000 0000 0100 1100 m << i
--------------------------------------------------
result = 0000 0100 0100 1101 n2 | mask2
</pre>
Complexity:
* Time: O(b), where b is the number of bits
* Space: O(b), where b is the number of bits
## Code
```
class Bits(object):
def insert_m_into_n(self, m, n, i, j):
if None in (m, n, i, j):
raise TypeError('Argument cannot be None')
if i < 0 or j < 0:
raise ValueError('Index cannot be negative')
left_mask = -1 << (j + 1)
right_mask = (1 << i) - 1
n_mask = left_mask | right_mask
# Clear bits from j to i, inclusive
n_cleared = n & n_mask
# Shift m into place before inserting it into n
m_mask = m << i
return n_cleared | m_mask
```
## Unit Test
```
%%writefile test_insert_m_into_n.py
import unittest
class TestBit(unittest.TestCase):
def test_insert_m_into_n(self):
n = int('0000010000111101', base=2)
m = int('0000000000010011', base=2)
expected = int('0000010001001101', base=2)
bits = Bits()
self.assertEqual(bits.insert_m_into_n(m, n, i=2, j=6), expected)
print('Success: test_insert_m_into_n')
def main():
test = TestBit()
test.test_insert_m_into_n()
if __name__ == '__main__':
main()
%run -i test_insert_m_into_n.py
```
| github_jupyter |
```
! pip install annoy nmslib
%matplotlib inline
```
# Approximate nearest neighbors in TSNE
This example presents how to chain KNeighborsTransformer and TSNE in a
pipeline. It also shows how to wrap the packages `annoy` and `nmslib` to
replace KNeighborsTransformer and perform approximate nearest neighbors.
These packages can be installed with `pip install annoy nmslib`.
Note: Currently `TSNE(metric='precomputed')` does not modify the precomputed
distances, and thus assumes that precomputed euclidean distances are squared.
In future versions, a parameter in TSNE will control the optional squaring of
precomputed distances (see #12401).
Note: In KNeighborsTransformer we use the definition which includes each
training point as its own neighbor in the count of `n_neighbors`, and for
compatibility reasons, one extra neighbor is computed when
`mode == 'distance'`. Please note that we do the same in the proposed wrappers.
Sample output::
Benchmarking on MNIST_2000:
---------------------------
AnnoyTransformer: 0.583 sec
NMSlibTransformer: 0.321 sec
KNeighborsTransformer: 1.225 sec
TSNE with AnnoyTransformer: 4.903 sec
TSNE with NMSlibTransformer: 5.009 sec
TSNE with KNeighborsTransformer: 6.210 sec
TSNE with internal NearestNeighbors: 6.365 sec
Benchmarking on MNIST_10000:
----------------------------
AnnoyTransformer: 4.457 sec
NMSlibTransformer: 2.080 sec
KNeighborsTransformer: 30.680 sec
TSNE with AnnoyTransformer: 30.225 sec
TSNE with NMSlibTransformer: 43.295 sec
TSNE with KNeighborsTransformer: 64.845 sec
TSNE with internal NearestNeighbors: 64.984 sec
```
# Author: Tom Dupre la Tour
#
# License: BSD 3 clause
import time
import sys
try:
import annoy
except ImportError:
print("The package 'annoy' is required to run this example.")
sys.exit()
try:
import nmslib
except ImportError:
print("The package 'nmslib' is required to run this example.")
sys.exit()
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.neighbors import KNeighborsTransformer
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.datasets import fetch_openml
from sklearn.pipeline import make_pipeline
from sklearn.manifold import TSNE
from sklearn.utils import shuffle
print(__doc__)
class NMSlibTransformer(TransformerMixin, BaseEstimator):
"""Wrapper for using nmslib as sklearn's KNeighborsTransformer"""
def __init__(self, n_neighbors=5, metric='euclidean', method='sw-graph',
n_jobs=1):
self.n_neighbors = n_neighbors
self.method = method
self.metric = metric
self.n_jobs = n_jobs
def fit(self, X):
self.n_samples_fit_ = X.shape[0]
# see more metric in the manual
# https://github.com/nmslib/nmslib/tree/master/manual
space = {
'sqeuclidean': 'l2',
'euclidean': 'l2',
'cosine': 'cosinesimil',
'l1': 'l1',
'l2': 'l2',
}[self.metric]
self.nmslib_ = nmslib.init(method=self.method, space=space)
self.nmslib_.addDataPointBatch(X)
self.nmslib_.createIndex()
return self
def transform(self, X):
n_samples_transform = X.shape[0]
# For compatibility reasons, as each sample is considered as its own
# neighbor, one extra neighbor will be computed.
n_neighbors = self.n_neighbors + 1
results = self.nmslib_.knnQueryBatch(X, k=n_neighbors,
num_threads=self.n_jobs)
indices, distances = zip(*results)
indices, distances = np.vstack(indices), np.vstack(distances)
if self.metric == 'sqeuclidean':
distances **= 2
indptr = np.arange(0, n_samples_transform * n_neighbors + 1,
n_neighbors)
kneighbors_graph = csr_matrix((distances.ravel(), indices.ravel(),
indptr), shape=(n_samples_transform,
self.n_samples_fit_))
return kneighbors_graph
class AnnoyTransformer(TransformerMixin, BaseEstimator):
"""Wrapper for using annoy.AnnoyIndex as sklearn's KNeighborsTransformer"""
def __init__(self, n_neighbors=5, metric='euclidean', n_trees=10,
search_k=-1):
self.n_neighbors = n_neighbors
self.n_trees = n_trees
self.search_k = search_k
self.metric = metric
def fit(self, X):
self.n_samples_fit_ = X.shape[0]
metric = self.metric if self.metric != 'sqeuclidean' else 'euclidean'
self.annoy_ = annoy.AnnoyIndex(X.shape[1], metric=metric)
for i, x in enumerate(X):
self.annoy_.add_item(i, x.tolist())
self.annoy_.build(self.n_trees)
return self
def transform(self, X):
return self._transform(X)
def fit_transform(self, X, y=None):
return self.fit(X)._transform(X=None)
def _transform(self, X):
"""As `transform`, but handles X is None for faster `fit_transform`."""
n_samples_transform = self.n_samples_fit_ if X is None else X.shape[0]
# For compatibility reasons, as each sample is considered as its own
# neighbor, one extra neighbor will be computed.
n_neighbors = self.n_neighbors + 1
indices = np.empty((n_samples_transform, n_neighbors),
dtype=int)
distances = np.empty((n_samples_transform, n_neighbors))
if X is None:
for i in range(self.annoy_.get_n_items()):
ind, dist = self.annoy_.get_nns_by_item(
i, n_neighbors, self.search_k, include_distances=True)
indices[i], distances[i] = ind, dist
else:
for i, x in enumerate(X):
indices[i], distances[i] = self.annoy_.get_nns_by_vector(
x.tolist(), n_neighbors, self.search_k,
include_distances=True)
if self.metric == 'sqeuclidean':
distances **= 2
indptr = np.arange(0, n_samples_transform * n_neighbors + 1,
n_neighbors)
kneighbors_graph = csr_matrix((distances.ravel(), indices.ravel(),
indptr), shape=(n_samples_transform,
self.n_samples_fit_))
return kneighbors_graph
def test_transformers():
"""Test that AnnoyTransformer and KNeighborsTransformer give same results
"""
X = np.random.RandomState(42).randn(10, 2)
knn = KNeighborsTransformer()
Xt0 = knn.fit_transform(X)
ann = AnnoyTransformer()
Xt1 = ann.fit_transform(X)
nms = NMSlibTransformer()
Xt2 = nms.fit_transform(X)
assert_array_almost_equal(Xt0.toarray(), Xt1.toarray(), decimal=5)
assert_array_almost_equal(Xt0.toarray(), Xt2.toarray(), decimal=5)
def load_mnist(n_samples):
"""Load MNIST, shuffle the data, and return only n_samples."""
mnist = fetch_openml("mnist_784")
X, y = shuffle(mnist.data, mnist.target, random_state=2)
return X[:n_samples] / 255, y[:n_samples]
def run_benchmark():
datasets = [
('MNIST_2000', load_mnist(n_samples=2000)),
('MNIST_10000', load_mnist(n_samples=10000)),
]
n_iter = 500
perplexity = 30
# TSNE requires a certain number of neighbors which depends on the
# perplexity parameter.
# Add one since we include each sample as its own neighbor.
n_neighbors = int(3. * perplexity + 1) + 1
transformers = [
('AnnoyTransformer', AnnoyTransformer(n_neighbors=n_neighbors,
metric='sqeuclidean')),
('NMSlibTransformer', NMSlibTransformer(n_neighbors=n_neighbors,
metric='sqeuclidean')),
('KNeighborsTransformer', KNeighborsTransformer(
n_neighbors=n_neighbors, mode='distance', metric='sqeuclidean')),
('TSNE with AnnoyTransformer', make_pipeline(
AnnoyTransformer(n_neighbors=n_neighbors, metric='sqeuclidean'),
TSNE(metric='precomputed', perplexity=perplexity,
method="barnes_hut", random_state=42, n_iter=n_iter), )),
('TSNE with NMSlibTransformer', make_pipeline(
NMSlibTransformer(n_neighbors=n_neighbors, metric='sqeuclidean'),
TSNE(metric='precomputed', perplexity=perplexity,
method="barnes_hut", random_state=42, n_iter=n_iter), )),
('TSNE with KNeighborsTransformer', make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode='distance',
metric='sqeuclidean'),
TSNE(metric='precomputed', perplexity=perplexity,
method="barnes_hut", random_state=42, n_iter=n_iter), )),
('TSNE with internal NearestNeighbors',
TSNE(metric='sqeuclidean', perplexity=perplexity, method="barnes_hut",
random_state=42, n_iter=n_iter)),
]
# init the plot
nrows = len(datasets)
ncols = np.sum([1 for name, model in transformers if 'TSNE' in name])
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, squeeze=False,
figsize=(5 * ncols, 4 * nrows))
axes = axes.ravel()
i_ax = 0
for dataset_name, (X, y) in datasets:
msg = 'Benchmarking on %s:' % dataset_name
print('\n%s\n%s' % (msg, '-' * len(msg)))
for transformer_name, transformer in transformers:
start = time.time()
Xt = transformer.fit_transform(X)
duration = time.time() - start
# print the duration report
longest = np.max([len(name) for name, model in transformers])
whitespaces = ' ' * (longest - len(transformer_name))
print('%s: %s%.3f sec' % (transformer_name, whitespaces, duration))
# plot TSNE embedding which should be very similar across methods
if 'TSNE' in transformer_name:
axes[i_ax].set_title(transformer_name + '\non ' + dataset_name)
axes[i_ax].scatter(Xt[:, 0], Xt[:, 1], c=y.astype(np.int32),
alpha=0.2, cmap=plt.cm.viridis)
axes[i_ax].xaxis.set_major_formatter(NullFormatter())
axes[i_ax].yaxis.set_major_formatter(NullFormatter())
axes[i_ax].axis('tight')
i_ax += 1
fig.tight_layout()
plt.show()
if __name__ == '__main__':
test_transformers()
run_benchmark()
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# 構造化されたデータの分類
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/beta/tutorials/keras/feature_columns">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ja/beta/tutorials/keras/feature_columns.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ja/beta/tutorials/keras/feature_columns.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
</table>
Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [docs-ja@tensorflow.org メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ja)にご連絡ください。
このチュートリアルでは、(例えばCSVファイルに保存された表形式データのような)構造化されたデータをどうやって分類するかを示します。ここでは、モデルの定義に[Keras](https://www.tensorflow.org/guide/keras)を、[feature columns](https://www.tensorflow.org/guide/feature_columns)をCSVファイルの列をモデルを訓練するための特徴量にマッピングするための橋渡し役として使用します。このチュートリアルには、下記のことを行うコードすべてが含まれています。
* [Pandas](https://pandas.pydata.org/)を使用したCSVファイルの読み込み
* [tf.data](https://www.tensorflow.org/guide/datasets)を使用して行データをシャッフルし、バッチ化するための入力パイプライン構築
* feature columnsを使ったCSVの列のモデル訓練用の特徴量へのマッピング
* Kerasを使ったモデルの構築と、訓練及び評価
## データセット
ここでは、Cleveland Clinic Foundation for Heart Diseaseが提供している小さな[データセット](https://archive.ics.uci.edu/ml/datasets/heart+Disease)を使用します。このCSVファイルには数百行が含まれています。行が患者を、列がその属性を表します。この情報を使用して、患者が心臓疾患を持っているかを予測します。このデータセットの場合には二値分類タスクとなります。
下記はこのデータセットの[說明](https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/heart-disease.names)です。数値列とカテゴリー列があることに注目してください。
>列| 說明| 特徴量の型 | データ型
>------------|--------------------|----------------------|-----------------
>Age | 年齢 | 数値型 | 整数
>Sex | (1 = 男性; 0 = 女性) | カテゴリー型 | 整数
>CP | 胸痛のタイプ (0, 1, 2, 3, 4) | カテゴリー型 | 整数
>Trestbpd | 安静時血圧 (単位:mm Hg 入院時) | 数値型 | 整数
>Chol | 血清コレステロール 単位:mg/dl | 数値型 | 整数
>FBS | (空腹時血糖 > 120 mg/dl) (1 = 真; 0 = 偽) | カテゴリー型 | 整数
>RestECG | 安静時心電図の診断結果 (0, 1, 2) | カテゴリー型 | 整数
>Thalach | 最大心拍数 | 数値型 | 整数
>Exang | 運動誘発狭心症 (1 = はい; 0 = いいえ) | カテゴリー型 | 整数
>Oldpeak | 安静時と比較した運動時のST低下 | 数値型 | 整数
>Slope | ピーク運動STセグメントの勾配 | 数値型 | 浮動小数点数
>CA | 蛍光透視法によって着色された主要血管の数(0−3) | 数値型 | 整数
>Thal | 3 = 正常; 6 = 固定欠陥; 7 = 可逆的欠陥 | カテゴリー型 | 文字列
>Target | 心臓疾患の診断 (1 = 真; 0 = 偽) | 分類 | 整数
## TensorFlow他ライブラリのインポート
```
!pip install sklearn
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
try:
# Colab only
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow import feature_column
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
```
## Pandasを使ったデータフレーム作成
[Pandas](https://pandas.pydata.org/)は、構造化データの読み込みや操作のための便利なユーティリティを持つPythonのライブラリです。ここでは、Pandasを使ってURLからデータをダウンロードし、データフレームに読み込みます。
```
URL = 'https://storage.googleapis.com/applied-dl/heart.csv'
dataframe = pd.read_csv(URL)
dataframe.head()
```
## データフレームを、訓練用、検証用、テスト用に分割
ダウンロードしたデータセットは1つのCSVファイルです。これを、訓練用、検証用、テスト用のデータセットに分割します。
```
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
```
## tf.dataを使った入力パイプラインの構築
次に、[tf.data](https://www.tensorflow.org/guide/datasets)を使ってデータフレームをラップします。こうすることで、feature columns をPandasデータフレームの列をモデル訓練用の特徴量へのマッピングするための橋渡し役として使うことができます。(メモリに収まらないぐらいの)非常に大きなCSVファイルを扱う場合には、tf.dataを使ってディスクから直接CSVファイルを読み込むことになります。この方法は、このチュートリアルでは扱いません。
```
# Pandasデータフレームからtf.dataデータセットを作るためのユーティリティメソッド
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
batch_size = 5 # デモ用として小さなバッチサイズを使用
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
```
## 入力パイプラインを理解する
入力パイプラインを構築したので、それが返すデータのフォーマットを見るために呼び出してみましょう。出力を読みやすくするためにバッチサイズを小さくしてあります。
```
for feature_batch, label_batch in train_ds.take(1):
print('Every feature:', list(feature_batch.keys()))
print('A batch of ages:', feature_batch['age'])
print('A batch of targets:', label_batch )
```
データセットが(データフレームにある)列名からなるディクショナリを返すことがわかります。列名から、データフレームの行に含まれる列の値が得られます。
## feature columnsの様々な型の例
TensorFlowにはたくさんの型のfeature columnがあります。このセクションでは、いくつかの型のfeature columnsを作り、データフレームの列をどのように変換しているかを示します。
```
# いくつかの型のfeature columnsを例示するためこのバッチを使用する
example_batch = next(iter(train_ds))[0]
# feature columnsを作りデータのバッチを変換する
# ユーティリティメソッド
def demo(feature_column):
feature_layer = layers.DenseFeatures(feature_column)
print(feature_layer(example_batch).numpy())
```
### 数値コラム
feature columnsの出力はモデルへの入力になります(上記で定義したdemo関数を使うと、データフレームの列がどのように変換されるかをつぶさに見ることができます)。[数値コラム](https://www.tensorflow.org/api_docs/python/tf/feature_column/numeric_column)は、最も単純な型のコラムです。数値コラムは実数特徴量を表現するのに使われます。このコラムを使う場合、モデルにはデータフレームの列の値がそのまま渡されます。
```
age = feature_column.numeric_column("age")
demo(age)
```
心臓疾患データセットでは、データフレームのほとんどの列が数値型です。
### バケット化コラム
数値をそのままモデルに入力するのではなく、値の範囲に基づいた異なるカテゴリーに分割したいことがあります。例えば、人の年齢を表す生データを考えてみましょう。[バケット化コラム](https://www.tensorflow.org/api_docs/python/tf/feature_column/bucketized_column)を使うと年齢を数値コラムとして表現するのではなく、年齢をいくつかのバケットに分割できます。下記のワンホット値が、各行がどの年齢範囲にあるかを表していることに注目してください。
```
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
demo(age_buckets)
```
### カテゴリー型コラム
このデータセットでは、Thalは('fixed'、'normal'、'reversible'のような)文字列として表現されています。文字列を直接モデルに入力することはできません。まず、文字列を数値にマッピングする必要があります。categorical vocabulary コラムを使うと、(上記で示した年齢バケットのように)文字列をワンホットベクトルとして表現することができます。カテゴリーを表す語彙(vocabulary)は[categorical_column_with_vocabulary_list](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list)を使ってリストで渡すか、[categorical_column_with_vocabulary_file](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_file)を使ってファイルから読み込むことができます。
```
thal = feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = feature_column.indicator_column(thal)
demo(thal_one_hot)
```
より複雑なデータセットでは、たくさんの列がカテゴリー型(例えば文字列)であることでしょう。feature columns はカテゴリー型データを扱う際に最も役に立ちます。このデータセットでは、カテゴリー型コラムは1つだけですが、他のデータセットを扱う際に使用できるいくつかの重要な型のfeature columnsを紹介するために、この列を使用することにします。
### 埋め込み型コラム
数種類の候補となる文字列ではなく、カテゴリー毎に数千(あるいはそれ以上)の値があるとしましょう。カテゴリーの数が多くなってくると、様々な理由から、ワンホットエンコーディングを使ってニューラルネットワークを訓練することが難しくなります。埋込み型コラムを使うと、こうした制約を克服することが可能です。[埋込み型コラム](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column)は、データを多次元のワンホットベクトルとして表すのではなく、セルの値が0か1かだけではなく、どんな数値でもとれるような密な低次元ベクトルとして表現します。埋め込みのサイズ(下記の例では8)は、チューニングが必要なパラメータです。
キーポイント:カテゴリー型コラムがたくさんの選択肢を持つ場合、埋め込み型コラムを使用することが最善の方法です。ここでは例を一つ示しますので、今後様々なデータセットを扱う際には、この例を参考にしてください。
```
# この埋込み型コラムの入力は、先程作成したカテゴリ型コラムであることに注意
thal_embedding = feature_column.embedding_column(thal, dimension=8)
demo(thal_embedding)
```
### ハッシュ化特徴コラム
値の種類が多いカテゴリー型コラムを表現するもう一つの方法が、[categorical_column_with_hash_bucket](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_hash_bucket)を使う方法です。このfeature columnは文字列をエンコードするために入力のハッシュ値を計算し、`hash_bucket_size`個のバケットの中から1つを選択します。このコラムを使用する場合には、語彙を用意する必要はありません。また、スペースの節約のために、実際のカテゴリー数に比べて極めて少ないバケット数を選択することも可能です。
キーポイント:この手法の重要な欠点の一つは、異なる文字列が同じバケットにマッピングされるというハッシュ値の衝突が起きることです。実務上は、データセットによっては、この問題を無視できることがあります。
```
thal_hashed = feature_column.categorical_column_with_hash_bucket(
'thal', hash_bucket_size=1000)
demo(feature_column.indicator_column(thal_hashed))
```
### クロスフィーチャーコラム
複数の特徴量をまとめて1つの特徴量にする、[フィーチャークロス](https://developers.google.com/machine-learning/glossary/#feature_cross)として知られている手法は、モデルが特徴量の組み合わせの一つ一つに別々の重みを学習することを可能にします。ここでは年齢とThalをクロスさせて新しい特徴量を作ってみます。交差列(`crossed_column`)が、起こりうるすべての組み合わせ全体のテーブル(これは非常に大きくなる可能性があります)を作るものではないことに注意してください。クロスフィーチャーコラムは、代わりにバックエンドとしてハッシュ化コラムを使用しているため、テーブルの大きさを選択することができます。
```
crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
demo(feature_column.indicator_column(crossed_feature))
```
## 使用するコラムを選択する
これまで、いくつかのfeature columnの使い方を見てきました。いよいよモデルの訓練にそれらを使用することにします。このチュートリアルの目的は、feature columnsを使うのに必要な完全なコード(いわば力学)を示すことです。以下ではモデルを訓練するための列を適当に選びました。
キーポイント:正確なモデルを構築するのが目的である場合には、できるだけ大きなデータセットを使用して、どの特徴量を含めるのがもっとも意味があるのかや、それらをどう表現したらよいかを、慎重に検討してください。
```
feature_columns = []
# 数値コラム
for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca']:
feature_columns.append(feature_column.numeric_column(header))
# バケット化コラム
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# インジケーター(カテゴリー型)コラム
thal = feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = feature_column.indicator_column(thal)
feature_columns.append(thal_one_hot)
# 埋め込み型コラム
thal_embedding = feature_column.embedding_column(thal, dimension=8)
feature_columns.append(thal_embedding)
# クロスフィーチャーコラム
crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
crossed_feature = feature_column.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
```
### 特徴量層の構築
feature columnsを定義し終わったので、次に[DenseFeatures](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/DenseFeatures)層を使ってKerasモデルへの入力とします。
```
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
```
これまでは、feature columnsの働きを見るため、小さなバッチサイズを使ってきました。ここではもう少し大きなバッチサイズの新しい入力パイプラインを作ります。
```
batch_size = 32
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
```
## モデルの構築、コンパイルと訓練
```
model = tf.keras.Sequential([
feature_layer,
layers.Dense(128, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(train_ds,
validation_data=val_ds,
epochs=5)
loss, accuracy = model.evaluate(test_ds)
print("Accuracy", accuracy)
```
キーポイント:一般的に、ディープラーニングが最良の結果となるのは、もっと大きくて、もっと複雑なデータセットです。この例のように小さなデータセットを使用する際には、強固なベースラインとして、決定木やランダムフォレストを使うことをおすすめします。このチュートリアルの目的は、訓練により正確なモデルを得ることではなく、構造化データの使い方をデモすることです。今後ご自分のデータセットに取り組まれる際の出発点として、これらのコードをお使いください。
## 次のステップ
構造化データの分類について更に多くのことを学ぶためには、自分自身で試してみることです。別のデータセットを見つけ、上記と同様のコードを使って、それを分類するモデルを訓練してみてください。正解率を上げるためには、モデルにどの特徴量を含めたらよいかや、その特徴量をどのように表現すべきかをじっくり考えてください。
| github_jupyter |
## Importación de librerías
```
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
```
## Cargo el dataset
```
data = pd.read_csv('../Data/properatti.csv',index_col=0)
#data.head()
```
## Valores Faltantes del dataset
```
data.isnull().sum()
data.loc[data['description'].isna(),['price','currency','price_aprox_usd','price_usd_per_m2','rooms','floor','expenses']]
nodesc_index = data.loc[data['description'].isna()].index
data.drop(index=nodesc_index,inplace=True)
data.isna().sum()
```
## Tipos de datos
```
data.dtypes
```
# Columna Currency
¿La moneda de publicación es pesos argentinos y dólares? ¿O hay otra?
```
data['currency'].value_counts()
```
Las publicaciones en Sol Peruano y Peso Uruguayo se deberán pasar a dólares.
### Tipo de Cambio (conversión de pesos a dolares y viceversa)
```
data['currency']=="USD"
data_USD= data.loc[data['currency']=="USD", ["price", "price_aprox_local_currency","price_aprox_usd"]]
data_USD.head(5)
y=data_USD["price_aprox_local_currency"]/data_USD['price_aprox_usd']
y=y.round(decimals=4)
y.unique()
```
1 dolar = 17.6445 pesos argentinos. (A este precio, yo compro!)
```
#cambio_dolar_a_peso = 17.6445
def dolar_a_peso(price_usd):
#return round(price_usd*17.6445,2)
return price_usd*17.6445
def peso_a_dolar(price_ars):
#return round(price_ars/17.6445,2)
return price_ars/17.6445
```
Las filas donde falta el precio en pesos, tambien falta el precio en dólares:
```
data.loc[(data['price_aprox_local_currency'].isna())&(data['price_aprox_usd'].isna())].shape
# data.loc[(data['price'].notnull())&(data['currency'].notnull())&(data['currency']=='ARS')&(data['price']!=data['price_aprox_local_currency']),['price','currency','price_aprox_local_currency','price_aprox_usd']].head(10)
```
# Precio por metro cuadrado
En cuanto al metro cuadrado, solo en 28295 filas falta el valor de ambos. Podemos calcular el valor del precio por metro cuadrado de una columna a partir de la otra.
```
data.loc[(data['price_usd_per_m2'].isna())&(data['price_per_m2'].isna())].shape
data.loc[(data['currency']=='USD'),['currency','price_aprox_local_currency','price','surface_covered_in_m2','price_per_m2',\
'price_aprox_usd','surface_total_in_m2','price_usd_per_m2']].head(10)
data.loc[(data['currency']=='ARS'),['currency','price_aprox_local_currency','price','surface_covered_in_m2','price_per_m2',\
'price_aprox_usd','surface_total_in_m2','price_usd_per_m2']].head(10)
```
$price\_usd\_per\_m2 = \frac{price\_aprox\_usd}{surface\_total\_in\_m2}$
$price\_per\_m2 = \frac{price}{surface\_covered\_in\_m2}$
El problema se da que a veces la superficie total es menor que la superficie cubierta. Por lo tanto, se decidió dropear estas 1106 filas.
```
data.loc[data['surface_covered_in_m2']>data['surface_total_in_m2']]#.shape
sup_invalida = data.loc[data['surface_covered_in_m2']>data['surface_total_in_m2']].index
data.drop(index=sup_invalida,inplace=True)
data.loc[(data['price_usd_per_m2'].isna())&(data['price_aprox_usd'].notnull())&(data['surface_total_in_m2'].notnull())\
&(data['surface_total_in_m2']!=0),
['price_usd_per_m2','price_aprox_usd','surface_total_in_m2']]
data.loc[(data['price_per_m2'].isna())&(data['price'].notnull())&(data['surface_covered_in_m2'].notnull())\
,['price_per_m2','price','surface_covered_in_m2']]
data.loc[data['surface_total_in_m2']==0,'surface_total_in_m2']=np.nan
data.loc[data['surface_covered_in_m2']==0,'surface_covered_in_m2']=np.nan
```
# Valores faltantes de Currency
```
data.loc[(data['currency'].isna())&(data['price_aprox_local_currency'].isna())&(data['price_aprox_usd'].isna()),['price','price_aprox_local_currency','price_aprox_usd']]
data.loc[(data['currency'].isna())&(data['price_aprox_local_currency'].notnull())&(data['price_aprox_usd'].notnull()),['price','price_aprox_local_currency','price_aprox_usd']]
```
De lo visto, no puedo calcular valores faltantes de currency a partir de price, price_aprox_usd y price_aprox_local_currency
## Cálculo de superficie, precio y precio por m2
La idea es ver en que filas tengo 2 valores y obtener el tercero.
Precio por m2 = Precio / Superficie.
Para esto, es necesario verificar primero que los valores sean válidos, es decir, valores no nulos.
**Verificación valores de superficie**
```
min_sup_valida = 10
data.loc[data['surface_total_in_m2']<min_sup_valida,'surface_total_in_m2']=np.nan
```
**Verificación valores de precio por m2**
Después de un análisis de la información, no se encuentra un patrón para establecer un mínimo o un máximo para verificar la validez de un dato. Mientras no hayan valores nulos (cero), se dejará todo como esta
```
#min_pricem2_valido =
data.loc[data['price_usd_per_m2']<=5,'price_usd_per_m2'].value_counts()
```
**Verificación valores de precio en dolares**
Sólo se encontró un valor nulo (cero).
```
print(data.loc[data['price_aprox_usd']<=4000,['price_aprox_usd','description']].values)
data.loc[data['price_aprox_usd']<=4000,'price_aprox_usd']=np.nan
```
Si hay valor de precio y hay valor superficie total, se puede calcular el valor del precio por metro cuadrado.
Pero este caso no ocurre.
```
data.loc[(data['price'].notnull())&(data['surface_total_in_m2'].notnull())&(data['price_usd_per_m2'].isna())].shape
```
Si hay valor de precio y hay valor de precio por metro cuadrado, se puede calcular el valor de la superficie. Pero este caso no ocurre.
```
data.loc[(data['price'].notnull())&(data['surface_total_in_m2'].isna())&(data['price_usd_per_m2'].notnull())].shape
```
Si hay valor de superficie total y hay valor de precio por metro cuadrado, se puede calcular el valor del precio.
Pero este caso no ocurre.
```
data.loc[(data['price'].isna())&(data['surface_total_in_m2'].notnull())&(data['price_usd_per_m2'].notnull())].shape
```
# Valores faltantes de 'place_name'
Se aprecia que todos los valores faltantes de 'place_name' corresponden a Tigre. Podríamos generar una función para tomar este valore de la columna 'place_with_parent_name', pero lo vamos a hardcodear.
```
data.loc[data['place_name'].isna(),['place_name','place_with_parent_names','country_name','state_name']]
def get_city(place):
a=place.split(sep='|')
new_a=[item for item in a if len(item)>0]
return(new_a[-1])
def get_place_names(text):
place_names = text.split(sep='|')
place_names = [item for item in place_names if len(item)>0]
place_names = place_names[0:-1]
complete_names = '|'.join(place_names)
return '|'+complete_names+'|'
data['place_name'].fillna('Tigre',inplace=True)
data.groupby('place_name')['place_name'].unique()
```
Vemos un lugar que se llama 'coordenadas 34.255511'. Vamos a verificar qué lugar es en realidad.
```
data.loc[data['place_name']=='coordenadas 34.255511','place_with_parent_names'].values
```
Es Tigre. También hay que arreglarlo.
```
data.loc[data['place_name']=='coordenadas 34.255511','place_with_parent_names']=get_place_names(data.loc[data['place_name']=='coordenadas 34.255511','place_with_parent_names'].values[0])
data.loc[data['place_name']=='coordenadas 34.255511','place_name']='Tigre'
data.loc[data['place_name']=='coordenadas 34.255511']
```
# Valores Faltantes de 'floor'
### Funciones para Limpieza y Búsqueda de Datos
```
#!pip install unidecode
import unidecode
def quitar_caracteres(entrada):
return str.lower(unidecode.unidecode(entrada))
# def pb_search(text):
# pattern_fl = r'(?:planta baja|pb|p.b.)'
# regex_fl = re.compile(pattern_fl, flags = re.IGNORECASE)
# floors_lst = regex_fl.findall(text)
#Revisar y ver que pasa con varias ocurrencias
def text_floor_search(text):
floor_map = {
'primer': 1.0,'segundo': 2.0,'tercer': 3.0,'cuarto': 4.0,'quinto': 5.0,
'sexto': 6.0,'septimo': 7.0,'octavo': 8.0,'noveno': 9.0,'decimo': 10.0
}
pattern_fl = r'(\w{2,7})\s?(?:piso)'
regex_fl = re.compile(pattern_fl, flags = re.IGNORECASE)
floors_lst = regex_fl.findall(text)
if not floors_lst:
return np.nan
if len(floors_lst)!=1:
return np.nan
if quitar_caracteres(floors_lst[0]) not in floor_map.keys():
return np.nan
floor = floor_map[quitar_caracteres(floors_lst[0])]
return floor
#Revisar y ver que pasa con varias ocurrencias
def text_search_floor(text):
floor_map = {
'un': 1.0,'uno': 1.0,'dos': 2.0,'tres': 3.0,'cuatro': 4.0,'cinco': 5.0,
'seis': 6.0,'siete': 7.0,'ocho': 8.0,'nueve': 9.0,'diez': 10.0,'once': 11.0,
'doce': 12.0,'trece': 13.0,'catorce': 14.0,'quince': 15.0
}
pattern_fl = r'(?:piso)\s?(\w{2,7})'
regex_fl = re.compile(pattern_fl, flags = re.IGNORECASE)
floors_lst = regex_fl.findall(text)
if not floors_lst:
return np.nan
if len(floors_lst)==0:
return np.nan
if len(floors_lst)==1:
num = floors_lst[0]
if quitar_caracteres(num) not in floor_map.keys():
return np.nan
floor = floor_map[quitar_caracteres(num)]
return floor
else:
floors_l = [item for item in floors_lst]
floors_val = [int(item in floor_map) for item in floors_l]
for floor,val in zip(floors_l,floors_val):
if val==1:
return floor_map[floor]
return np.nan
#Revisar y ver que pasa con varias ocurrencias
def num_floor_search(text):
pattern_fl = r'(\d{1,2})\.?(°|\w{2})\s?(?:piso)'
regex_fl = re.compile(pattern_fl, flags = re.IGNORECASE)
floors_lst = regex_fl.findall(text)
abb_lst = ['er','do','to','mo','vo','no','deg','ero','']
if not floors_lst:
return np.nan
if len(floors_lst)!=1:
return np.nan
sufijo=quitar_caracteres(floors_lst[0][1])
if sufijo not in abb_lst:
return np.nan
num=float(floors_lst[0][0])
# if num<15 and num>9:
# print("Piso: {}".format(num))
# print(text)
if num>15:
return np.nan
else:
return num
#return(floors_lst)
#Revisar y ver que pasa con varias ocurrencias
def num_search_floor(text):
pattern_fl = r'\s?(?:piso)(\d{1,2})'
regex_fl = re.compile(pattern_fl, flags = re.IGNORECASE)
floors_lst = regex_fl.findall(text)
if not floors_lst:
return np.nan
if len(floors_lst)!=1:
return np.nan
num = float(floors_lst[0])
if num>40:
return np.nan
else:
return num
def clean_temp_col(df,tempCol):
"""Cleans a tempCol column from a df dataframe. Writes nan on every row.
Parameters
----------
df : pandas.DataFrame
The dataframe you want to clean a temporary column.
tempCol: str
The name passed as a string of the column to clean.
"""
df.loc[:,tempCol]=np.nan
def new_found(df,tempCol,cleanCol):
"""Shows a message about the new data found.
Parameters
----------
df : pandas.DataFrame
The dataframe to take data from.
tempCol: str
The name passed as a string of the temporary column.
cleanCol: str
the name passed as a string of the clean column.
"""
print("Nuevos: {}.".format(df[tempCol].notnull().sum()))
print("Total: {}.".format(df[cleanCol].notnull().sum()))
import scipy.stats as stats
#valor_mas_chico=0
def calculate_iqr_values(array):
"""Calculates inferior and superior limits of the iqr plus a margin.
Returns
---------
lim_inf: float
q1-1.5*iqr
lim_sup: float
q3+1.5*iqr
"""
q1,q3=np.percentile(array,[25 ,75])
arr_iqr = stats.iqr(array)
lim_inf = q1-1.5*arr_iqr
# if lim_inf<valor_mas_chico:
# lim_inf=valor_mas_chico
lim_sup = q3+1.5*arr_iqr
return lim_inf,lim_sup
def check_outliers(x,min_lim,max_lim):
if x<min_lim:
return np.nan
elif x>max_lim:
return np.nan
else:
return x
```
## Tipos de propiedad
El número de piso tiene mayor sentido e importancia para las propiedades de tipo 'apartment' que para house, PH y store, las cuales suelen ser en planta baja o primer piso.
```
data['property_type'].value_counts()
```
## Análisis de 'floor' para Casas
El valor de piso para una casa suele ser 0 (PB). Hay casas que se encuentran en un primer piso al subir por una escalera. Por lo cual, se decidió poner en cero el piso de todas las casas que digan estar en un segundo piso o superior.
**¿En qué piso dice que están las casas?**
Si nos fijamos hay valores que carecen de sentido y se le atribuye a un error en el cargado de los datos.
```
#data.loc[data['property_type']=='house']['floor'].value_counts()
np.set_printoptions(suppress=True)
np.sort(data.loc[data['property_type']=='house']['floor'].unique())
data.loc[data['property_type']=='house']['floor'].unique()
```
Con un chequeo 'automático' de outliers, reduzco los valores a los siguientes:
```
iqr_values=calculate_iqr_values(data.loc[(data['floor'].notnull())&(data['property_type']=='PH'),'floor'])
data.loc[(data['property_type']=='house')&(data['floor'].notnull()),'floor'].apply(check_outliers,args=iqr_values).unique()
```
Si bien algunos valores no parece tener sentido, como una casa en el piso 6 o 7, luego de revisar las descripciones se vio que algunas están mal catalogadas como casas (y son departamentos), o habla de duplex en en el piso 7. Por lo tanto, se decide dejar estos como valores válidos, y el resto asignarles NaN.
## Corrección de los datos de Casas
```
#data.loc[(data['property_type']=='house') & (data['floor']==1)]['description'].values
iqr_values_house=calculate_iqr_values(data.loc[(data['floor'].notnull())&(data['property_type']=='PH'),'floor'])
data.loc[(data['property_type']=='house')&(data['floor']>iqr_values[1]),'floor']=np.nan
# print(data.loc[data['property_type']=='house','floor'].value_counts(dropna=False))
variantes = ['1er piso por escalera','1° piso por escalera','1 piso por escalera',
'primer piso por escalera','planta alta por escalera']
for text in variantes:
house_PA=data.loc[(data['property_type']=='house')&(data['floor'].isna())]['description']\
.str.contains(text,case=False,regex=False).astype(float).apply(lambda x: x if x else np.nan)
#data.loc[(data['property_type']=='house')&(data['floor'].isna()),'floor'].fillna(house_PA,inplace=True)
data.loc[(data['property_type']=='house')&(data['floor'].isna()),'floor']=data.loc[(data['property_type']=='house')&(data['floor'].isna()),'floor'].add(house_PA,fill_value=0)
#print(data.loc[data['property_type']=='house','floor'].value_counts(dropna=False))
```
¿Y ahora?
```
#data.loc[data['property_type']=='house']['floor'].value_counts()
np.set_printoptions(suppress=True)
np.sort(data.loc[data['property_type']=='house']['floor'].unique())
data.loc[data['property_type']=='house']['floor'].value_counts(dropna=False, sort=True)
```
Los valores faltantes de casas (39456 datos) los completo con 0 (las casas suelen estar en PB).
```
data.loc[(data['property_type']=='house')&(data['floor'].isnull()),'floor']=0
data.loc[data['property_type']=='house']['floor'].value_counts(dropna=False, sort=True)
```
## Análisis de 'floor' para PH
El valor de piso de un PH suele ser PB o 1° piso. Pero hay casos donde se ve que está en un segundo o un tercer piso. Para mayor número de piso, encontramos muchos valores incorrectos. Por eso, un número de piso 4 o mayor, será tomado como incorrecto y se pondrá a cero, para realizar una corrección similar a la hecha con las casas
```
np.sort(data.loc[data['property_type']=='PH']['floor'].unique())
#data.loc[data['property_type']=='PH']['floor'].value_counts(dropna=False, sort=True)
iqr_values_ph=calculate_iqr_values(data.loc[(data['floor'].notnull())&(data['property_type']=='PH'),'floor'])
data.loc[(data['property_type']=='PH')&(data['floor'].notnull()),'floor'].apply(check_outliers,args=iqr_values).unique()
```
Por outliers elimino una buena cantidad de valores extraños para el número de piso de un PH.
```
#data.loc[(data['property_type']=='PH')&(data['floor']<=iqr_values[1]),['floor','description']].values
```
## Corrección de los datos de PH
```
data.loc[(data['property_type']=='PH')]['floor'].value_counts(dropna=False,sort=True)
```
Los datos de piso>7 los tomamos como inválidos y los ponemos en NaN.
```
data.loc[(data['property_type']=='PH')&(data['floor']>iqr_values[1]),'floor']=np.nan
data.loc[(data['property_type']=='PH')]['floor'].value_counts(dropna=False,sort=True)
```
Al resto de valores (los 5246 NaN) los paso por una búsqueda de piso (entre 0 y 3).
```
patterns_pb = ['planta baja','PB','P.B.']
acc_1 = ['primer piso','1er piso','1° piso']
acc_2 = ['segundo piso','2do piso','2° piso']
acc_3 = ['tercer piso','3er piso','3° piso']
accepted = [patterns_pb,acc_1,acc_2,acc_3]
piso = 0.0
for acc in accepted:
for text in acc:
ph_piso = data.loc[(data['property_type']=='PH')&(data['floor'].isna()),'description']\
.str.contains(text,case=False,regex=False)
data.loc[(data['property_type']=='PH')&(ph_piso),'floor']=piso
piso+=1
data.loc[(data['property_type']=='PH')]['floor'].value_counts(dropna=False,sort=True)
```
Al resto de NaN los pongo en cero.
```
data.loc[(data['property_type']=='PH')&(data['floor'].isna()),'floor']=0
#data.loc[(data['property_type']=='PH')&(data['floor'].isna()),'floor']
data.loc[(data['property_type']=='PH')]['floor'].value_counts(dropna=False,sort=True)
```
## Análisis de 'floor' para store
```
#np.sort(data.loc[data['property_type']=='store']['floor'].unique())
data.loc[data['property_type']=='store']['floor'].value_counts(dropna=False, sort=True)
```
Luego de una inspección de los datos y algunas descripciones, se decide que hay valores válidos hasta piso 10 (oficina). Luego, tomamos como valores inválidos (mal cargados), y seguirán el mismo camino que el análisis de la descripción que las categorías de 'house' y 'PH'.
```
iqr_values_store=calculate_iqr_values(data.loc[(data['floor'].notnull())&(data['property_type']=='store'),'floor'])
data.loc[(data['property_type']=='store')&(data['floor'].notnull()),'floor'].apply(check_outliers,args=iqr_values).unique()
```
## Corrección de los datos de store
Luego de revisar algunas descripciones, se encontró que los valores de piso válidos encontrados son hasta el piso 10. Hay oficinas clasificadas como store. Haciendo un filtro por outliers, los datos válidos son hasta el piso 2. Tomamos como despreciable los datos perdidos.
```
data.loc[(data['property_type']=='store')]['floor'].value_counts(dropna=False,sort=True)
data.loc[(data['property_type']=='store') & (data['floor']>iqr_values_store[1]),'floor']=0
data.loc[(data['property_type']=='store')]['floor'].value_counts(dropna=False,sort=True)
data['clean_fl']=data['floor']
data['temp_fl']=np.nan
func_lst = [text_floor_search,text_search_floor,num_floor_search,num_search_floor]
for func in func_lst:
# print(func.__name__)
# print(data.loc[(data['property_type']=='store')&(data['floor'].isna())]['description'].apply(func).value_counts()
# )
clean_temp_col(data,'temp_fl')
data.loc[data['property_type']=='store','temp_fl']=data.loc[(data['property_type']=='store')&(data['floor'].isna())]['description'].apply(func)
data.loc[data['property_type']=='store','temp_fl'] = data.loc[data['property_type']=='store','temp_fl'].apply(check_outliers,args=iqr_values_store)
data.loc[data['property_type']=='store','clean_fl']=data.loc[data['property_type']=='store','clean_fl'].add(data.loc[data['property_type']=='store','temp_fl'],fill_value=0)
data.loc[(data['property_type']=='store'),'floor'] = data[data['property_type']=='store']['floor'].add(data[data['property_type']=='store']['clean_fl'],fill_value=0)
#data.loc[(data['property_type']=='store')&(data['clean_fl']).notnull()]
data.loc[(data['property_type']=='store')]['floor'].value_counts(dropna=False,sort=True)
```
El resto de los NaN los pongo a cero.
```
data.loc[(data['property_type']=='store')&(data['floor'].isna()),'floor']=0
data.loc[(data['property_type']=='store')]['floor'].value_counts(dropna=False,sort=True)
data['floor'].isna().sum()
```
## Análisis de 'floor' para apartment
Después de una análisis exploratorio y lectura de algunas descripciones se encontró que el piso válido más alto es el piso 40 (departamento en puerto madero). Los valores mayores se pondrán a NaN y se buscará el valor correcto en title y description.
```
np.sort(data.loc[data['property_type']=='apartment']['floor'].unique())
#data.loc[(data['property_type']=='apartment')&(data['floor']<=40)&(data['floor']>=10),['floor','description']].sample(10).values
```
Filtrando por outliers, me quedan estos valores:
```
iqr_values_apt=calculate_iqr_values(data.loc[(data['floor'].notnull())&(data['property_type']=='apartment'),'floor'])
data.loc[(data['property_type']=='apartment')&(data['floor'].notnull()),'floor'].apply(check_outliers,args=iqr_values).unique()
```
Pero se ha hecho un análisis donde se encontró departamentos hasta en piso 40. Por lo tanto, se tomará como valores válidos para estos datos hasta piso 40. Lo mismo se tomará para los demás valores buscados en la descripción de la propiedad.
```
data.loc[(data['property_type']=='apartment')&(data['floor']>40),'floor']=np.nan
data.loc[(data['floor']<=15)|(data['floor'].isna()),'floor'].value_counts(dropna=False)
```
## Columnas temporales
```
data['clean_fl']=data['floor']
data['temp_fl']=np.nan
```
## Búsqueda de piso para apartment
Itero con las columnas 'title' y 'description'
```
#print(apartment_df.columns)
data['description'] = data['description'].fillna("-")
fl_func_lst = [text_floor_search,num_floor_search,text_search_floor,num_search_floor]
col_lst = ['title','description']
for col in col_lst:
print("------------------------")
print("Columna: {}".format(col))
print("------------------------")
for func in fl_func_lst:
clean_temp_col(data,'temp_fl')
data.loc[data['clean_fl'].isna(),'temp_fl'] = data[data['clean_fl'].isna()][col].apply(func)
data.loc[data['property_type']=='apartment','temp_fl'] = data.loc[data['property_type']=='apartment','temp_fl'].apply(check_outliers,args=(0,40))
data.loc[data['clean_fl'].isna(),'clean_fl'] = data['clean_fl'].add(data['temp_fl'],fill_value=0)
print(func.__name__)
new_found(data,'temp_fl','clean_fl')
data.loc[data['floor'].isna(),'floor']=data.loc[data['floor'].isna(),'clean_fl']
data['floor'].isna().sum()
```
### Imputar valores faltantes por mediana por place.
```
apartment_df = data.groupby('property_type').get_group('apartment')
floor_place_dict = apartment_df[['place_name','floor']].groupby('place_name').aggregate(np.median).to_dict()
floor_place_dict = floor_place_dict['floor']
floor_place_dict
for key in floor_place_dict.keys():
data.loc[(data['place_name']==key)&(data['floor'].isna()),'floor']=floor_place_dict[key]
#data.loc[(data['floor']<=15)|(data['floor'].isna()),'floor'].value_counts(dropna=False)
```
## Elimino columnas temporales
```
data.drop(['clean_fl','temp_fl'],axis=1,inplace=True)
data.isna().sum()
```
# Valores Faltantes de 'rooms'
### Funciones para la búsqueda de ambientes con RegEx
Defino algunas funciones con algunos patrones de RegEx para buscar el número de ambientes por dígitos o por texto.
```
def mono_amb_search(text):
'''Busca si aparece algun monoambiente en el texto.
'''
#pattern_1amb = r'mono\s?ambiente'
#este funciona mejor, captura más.
pattern_1amb = r'mono\s?h?amb\.?\w+'
regex_1amb = re.compile(pattern_1amb, flags = re.IGNORECASE)
mono_amb = regex_1amb.findall(text)
if(mono_amb):
return 1
else:
return np.nan
def more_amb_search(text):
'''Busca la cantidad de ambientes con dígitos.
'''
accepted_lst = ['ambientes','ambientes','amb.','amvientes','anvientes','hambientes',
'hamvientes','hanvientes','ambintes','ambtes','ambiemtes','ambietes',
'ambieintes','ambeintes','ambentes','ambs','ambietnes','ambienes',
'ambientessan','ambc']
accepted = ';'.join(accepted_lst)
pattern_amb = r'(\d+)\s*(h?(?:amb|anv|amv)\.?\w*)'
regex_amb = re.compile(pattern_amb, flags = re.IGNORECASE)
ambientes = regex_amb.findall(text)
if not ambientes:
return np.nan
if len(ambientes)>1:
#si es cero, no encontré nada. si es mayor a uno, es un proyecto o un complejo.
#lo dejo para después.
#return np.nan
amb_lst = [float(item[0]) for item in ambientes if item[1].lower() in accepted]
if amb_lst:
amb=max(amb_lst)
if amb==0:
return np.nan
else:
return(max(amb_lst))
else:
return np.nan
if len(ambientes)==1:
if ambientes[0][1].lower() in accepted:
amb = float(ambientes[0][0])
if amb==0:
return np.nan
# if amb<20:
# return amb
# else:
# return np.nan
return amb
def text_amb_search(text):
'''Buscar por un número con palabras y convertirlo a float.
'''
convert_dict = {
'un': 1.0,'uno': 1.0,'dos': 2.0,'tres': 3.0,'cuatro': 4.0,'cinco': 5.0,
'seis': 6.0,'siete': 7.0,'ocho': 8.0,'nueve': 9.0,'diez': 10.0,'once': 11.0,
'doce': 12.0,'trece': 13.0,'catorce': 14.0,'quince': 15.0
}
accepted_lst = ['ambientes','ambientes','amb.','amvientes','anvientes','hambientes',
'hamvientes','hanvientes','ambintes','ambtes','ambiemtes','ambietes',
'ambieintes','ambeintes','ambentes','ambs','ambietnes','ambienes',
'ambientessan','ambc']
accepted = ';'.join(accepted_lst)
pattern_amb = r'([a-z]{2,7})\s*(h?(?:amb|anv|amv)\.?\w*)'
regex_amb = re.compile(pattern_amb, flags = re.IGNORECASE)
ambientes = regex_amb.findall(text)
if not ambientes:
return np.nan
if len(ambientes)!=1:
amb_lst = [convert_dict[item[0].lower()] for item in ambientes if item[1].lower() in accepted\
and item[0].lower() in convert_dict.keys()]
if amb_lst:
amb = max(amb_lst)
if amb==0:
return np.nan
return amb
else:
return np.nan
else:
if ambientes[0][1].lower() not in accepted:
return np.nan
else:
if ambientes[0][0].lower() not in convert_dict.keys():
return np.nan
else:
amb = convert_dict[ambientes[0][0].lower()]
if amb==0:
return np.nan
return amb
def dorm_search(text):
pattern_dorm = r'([0-9]+)\s*(?:dorm|habit|habiat|amplios dorm|buenos dorm)\.?(?:itorio|cion|acion|ación)?(?:s|es)?'
regex_dorm = re.compile(pattern_dorm, flags = re.IGNORECASE)
dorms = regex_dorm.findall(text)
if not dorms:
return np.nan
if len(dorms)>1:
dorm_lst = [float(item) for item in dorms]
return max(dorm_lst)
if len(dorms)==1:
amb = float(dorms[0])+1
if amb<=20:
return amb
else:
return np.nan
def dorm_search2(text):
pattern_dorm = r'(?:dorm|habit|habiat)\.?(?:itorio|cion|acion|ación)?(?:s|es)?:?\s*([0-9]+)'
regex_dorm = re.compile(pattern_dorm, flags = re.IGNORECASE)
dorms = regex_dorm.findall(text)
if not dorms:
return np.nan
if len(dorms)>1:
dorm_lst = [float(item) for item in dorms]
return max(dorm_lst)+1
if len(dorms)==1:
amb = float(dorms[0])+1
if amb<=20:
return amb
else:
return np.nan
def text_dorm_search(text):
convert_dict = {
'un': 1.0,'uno': 1.0,'dos': 2.0,'tres': 3.0,'cuatro': 4.0,
'cinco': 5.0,'seis': 6.0,'siete': 7.0,'ocho': 8.0,'nueve': 9.0,
'diez': 10.0,'once': 11.0,'doce': 12.0,'trece': 13.0,
'catorce': 14.0,'quince': 15.0
}
pattern_dorm = r'([a-z]{2,7})\s*(?:dorm|habit|amplios dorm)\.?(?:itorio|acion|ación)?(?:s|es)?'
regex_dorm = re.compile(pattern_dorm, flags = re.IGNORECASE)
dorms = regex_dorm.findall(text)
if not dorms:
return np.nan
if len(dorms)>1:
dorms_lst = [convert_dict[item.lower()] for item in dorms if item.lower() in convert_dict.keys()]
if dorms_lst:
return max(dorms_lst)
else:
return np.nan
if len(dorms)==1:
if dorms[0].lower() not in convert_dict.keys():
return np.nan
else:
amb = convert_dict[dorms[0].lower()] + 1.0
if amb<=20:
return amb
else:
return np.nan
def clean_temp_col(df,tempCol):
"""Cleans a tempCol column from a df dataframe. Writes nan on every row.
Parameters
----------
df : pandas.DataFrame
The dataframe you want to clean a temporary column.
tempCol: str
The name passed as a string of the column to clean.
"""
df.loc[:,tempCol]=np.nan
def new_found(df,tempCol,cleanCol):
"""Shows a message about the new data found.
Parameters
----------
df : pandas.DataFrame
The dataframe to take data from.
tempCol: str
The name passed as a string of the temporary column.
cleanCol: str
the name passed as a string of the clean column.
"""
print("Nuevos: {}.".format(df[tempCol].notnull().sum()))
print("Total: {}.".format(df[cleanCol].notnull().sum()))
import scipy.stats as stats
#valor_mas_chico=0
def calculate_iqr_values(array):
"""Calculates inferior and superior limits of the iqr plus a margin.
Returns
---------
lim_inf: float
q1-1.5*iqr
lim_sup: float
q3+1.5*iqr
"""
q1,q3=np.percentile(array,[25 ,75])
arr_iqr = stats.iqr(array)
lim_inf = q1-1.5*arr_iqr
# if lim_inf<valor_mas_chico:
# lim_inf=valor_mas_chico
lim_sup = q3+1.5*arr_iqr
return lim_inf,lim_sup
def check_outliers(x,min_lim,max_lim):
if x<min_lim:
return np.nan
elif x>max_lim:
return np.nan
else:
return x
data['property_type'].value_counts(dropna=False)
plt.figure(figsize=(8,8))
plt.grid(True)
ax = sns.boxplot(x="property_type", y="rooms", data=data, palette="Set1")
plt.title('Distribución de ambientes por tipo de propiedad')
plt.xlabel('Tipo de Propiedad')
plt.ylabel('N° de Ambientes')
```
## Verificar outliers para house
```
data.loc[data['property_type']=='house','rooms'].value_counts(dropna=False)
iqr_values_house=calculate_iqr_values(data.loc[(data['rooms'].notnull())&(data['property_type']=='house'),'rooms'])
print(data.loc[(data['property_type']=='house')&(data['rooms'].notnull()),'rooms'].apply(check_outliers,args=iqr_values_house).unique())
print(iqr_values_house)
#data.loc[(data['property_type']=='house')&(data['rooms']>iqr_values_house[1]),['rooms','description']].sample(10).values
data.loc[(data['property_type']=='house')&(data['rooms']>iqr_values_house[1]),'rooms']=np.nan
data.loc[data['property_type']=='house','rooms'].value_counts(dropna=False)
```
## Búsqueda de ambientes para house
```
data['clean_amb']=data['rooms']
data['temp_amb']=np.nan
func_lst = [mono_amb_search,more_amb_search,text_amb_search,dorm_search,text_dorm_search,dorm_search2]
for func in func_lst:
# print(func.__name__)
# print(data.loc[(data['property_type']=='store')&(data['floor'].isna())]['description'].apply(func).value_counts()
# )
clean_temp_col(data,'temp_amb')
data.loc[(data['property_type']=='house')&(data['rooms'].isna()),'temp_amb']=data.loc[(data['property_type']=='house')&(data['rooms'].isna())]['description'].apply(func)
data.loc[data['property_type']=='house','temp_amb'] = data.loc[data['property_type']=='house','temp_amb'].apply(check_outliers,args=(1,iqr_values_house[1]))
data.loc[(data['property_type']=='house')&(data['clean_amb'].isna()),'clean_amb']=data.loc[(data['property_type']=='house')&(data['clean_amb'].isna()),'clean_amb'].add(data.loc[data['property_type']=='house','temp_amb'],fill_value=0)
data.loc[(data['property_type']=='house')&(data['rooms'].isna()),'rooms'] = data[(data['property_type']=='house')&(data['rooms'].isna())]['rooms'].add(data[data['property_type']=='house']['clean_amb'],fill_value=0)
#data.loc[(data['property_type']=='store')&(data['clean_fl']).notnull()]
data.loc[(data['property_type']=='house')]['rooms'].value_counts(dropna=False,sort=True)
```
## Verificar outliers para PH
```
data.loc[data['property_type']=='PH','rooms'].value_counts(dropna=False)
iqr_values_PH=calculate_iqr_values(data.loc[(data['rooms'].notnull())&(data['property_type']=='PH'),'rooms'])
print(data.loc[(data['property_type']=='PH')&(data['rooms'].notnull()),'rooms'].apply(check_outliers,args=iqr_values_PH).unique())
print(iqr_values_PH)
```
Luego de revisar los outliers de los ambientes de los PH y sus descripciones, se notó que todos salvo el de 20 son correctos.
```
#data.loc[(data['property_type']=='house')&(data['rooms']>iqr_values_house[1]),['rooms','description']].sample(10).values
data.loc[(data['property_type']=='PH')&(data['rooms']>iqr_values_PH[1]),'rooms']=np.nan
data.loc[data['property_type']=='PH','rooms'].value_counts(dropna=False)
```
## Búsqueda de ambientes para PH
```
data['clean_amb']=data['rooms']
data['temp_amb']=np.nan
func_lst = [mono_amb_search,more_amb_search,text_amb_search,dorm_search,text_dorm_search,dorm_search2]
for func in func_lst:
# print(func.__name__)
# print(data.loc[(data['property_type']=='store')&(data['floor'].isna())]['description'].apply(func).value_counts()
# )
clean_temp_col(data,'temp_amb')
data.loc[(data['property_type']=='PH')&(data['rooms'].isna()),'temp_amb']=data.loc[(data['property_type']=='PH')&(data['rooms'].isna())]['description'].apply(func)
data.loc[data['property_type']=='PH','temp_amb'] = data.loc[data['property_type']=='PH','temp_amb'].apply(check_outliers,args=iqr_values_PH)
data.loc[(data['property_type']=='PH')&(data['clean_amb'].isna()),'clean_amb']=data.loc[(data['property_type']=='PH')&(data['clean_amb'].isna()),'clean_amb'].add(data.loc[data['property_type']=='PH','temp_amb'],fill_value=0)
data.loc[(data['property_type']=='PH')&(data['rooms'].isna()),'rooms'] = data[(data['property_type']=='PH')&(data['rooms'].isna())]['rooms'].add(data[data['property_type']=='PH']['clean_amb'],fill_value=0)
#data.loc[(data['property_type']=='store')&(data['clean_fl']).notnull()]
data.loc[(data['property_type']=='PH')]['rooms'].value_counts(dropna=False,sort=True)
#data.loc[(data['property_type']=='PH')&(data['rooms'].isna()),'description'].sample(10).values
```
## Verificar outliers para store
```
data.loc[data['property_type']=='store','rooms'].value_counts(dropna=False)
iqr_values_store=calculate_iqr_values(data.loc[(data['rooms'].notnull())&(data['property_type']=='store'),'rooms'])
print(data.loc[(data['property_type']=='store')&(data['rooms'].notnull()),'rooms'].apply(check_outliers,args=iqr_values_store).unique())
print(iqr_values_store)
#data.loc[(data['property_type']=='store')&(data['rooms']>iqr_values_store[1]),['rooms','description']].values
data.loc[(data['property_type']=='store')&(data['rooms']>iqr_values_store[1]),'rooms']=np.nan
data.loc[data['property_type']=='store','rooms'].value_counts(dropna=False)
```
## Búsqueda de ambientes para store
```
data['clean_amb']=data['rooms']
data['temp_amb']=np.nan
func_lst = [mono_amb_search,more_amb_search,text_amb_search,dorm_search,text_dorm_search,dorm_search2]
for func in func_lst:
# print(func.__name__)
# print(data.loc[(data['property_type']=='store')&(data['floor'].isna())]['description'].apply(func).value_counts()
# )
clean_temp_col(data,'temp_amb')
data.loc[(data['property_type']=='store')&(data['rooms'].isna()),'temp_amb']=data.loc[(data['property_type']=='store')&(data['rooms'].isna())]['description'].apply(func)
data.loc[data['property_type']=='store','temp_amb'] = data.loc[data['property_type']=='store','temp_amb'].apply(check_outliers,args=iqr_values_store)
data.loc[(data['property_type']=='store')&(data['clean_amb'].isna()),'clean_amb']=data.loc[(data['property_type']=='store')&(data['clean_amb'].isna()),'clean_amb'].add(data.loc[data['property_type']=='store','temp_amb'],fill_value=0)
data.loc[(data['property_type']=='store')&(data['rooms'].isna()),'rooms'] = data[(data['property_type']=='store')&(data['rooms'].isna())]['rooms'].add(data[data['property_type']=='PH']['clean_amb'],fill_value=0)
#data.loc[(data['property_type']=='store')&(data['clean_fl']).notnull()]
data.loc[(data['property_type']=='store')&(data['rooms'].isna())&(data['surface_total_in_m2']<50),'rooms']=1
data.loc[(data['property_type']=='store')]['rooms'].value_counts(dropna=False,sort=True)
```
## Verificar outliers para apartment
```
data.loc[data['property_type']=='apartment','rooms'].value_counts(dropna=False)
iqr_values_apartment=calculate_iqr_values(data.loc[(data['rooms'].notnull())&(data['property_type']=='apartment'),'rooms'])
print(data.loc[(data['property_type']=='apartment')&(data['rooms'].notnull()),'rooms'].apply(check_outliers,args=iqr_values_apartment).unique())
print(iqr_values_apartment)
#data.loc[(data['property_type']=='apartment')&(data['rooms']==iqr_values_apartment[1]+3),['rooms','description']].sample(10).values
```
Luego de revisar los outliers de los ambientes de los apartment y sus descripciones, se decidió tomar como válidos a todos los que aparecen hasta 7 ambientes. Esto es debido a que existe una buena cantidad de publicaciones donde se venden varias propiedades juntas o departamentos de varios pisos.
```
#data.loc[(data['property_type']=='apartment')&(data['rooms']>iqr_values_apartment[1]),['rooms','description']].sample(10).values
data.loc[(data['property_type']=='apartment')&(data['rooms']>7),'rooms']=np.nan
data.loc[data['property_type']=='apartment','rooms'].value_counts(dropna=False)
```
## Búsqueda de ambientes para apartment
```
print(iqr_values_apartment)
data['clean_amb']=data['rooms']
data['temp_amb']=np.nan
func_lst = [mono_amb_search,more_amb_search,text_amb_search,dorm_search,text_dorm_search,dorm_search2]
for func in func_lst:
# print(func.__name__)
# print(data.loc[(data['property_type']=='store')&(data['floor'].isna())]['description'].apply(func).value_counts()
# )
clean_temp_col(data,'temp_amb')
data.loc[(data['property_type']=='apartment')&(data['rooms'].isna()),'temp_amb']=data.loc[(data['property_type']=='apartment')&(data['rooms'].isna())]['description'].apply(func)
data.loc[data['property_type']=='apartment','temp_amb'] = data.loc[data['property_type']=='apartment','temp_amb'].apply(check_outliers,args=(iqr_values_apartment[0],iqr_values_apartment[1]+2.5))
data.loc[(data['property_type']=='apartment')&(data['clean_amb'].isna()),'clean_amb']=data.loc[(data['property_type']=='apartment')&(data['clean_amb'].isna()),'clean_amb'].add(data.loc[data['property_type']=='apartment','temp_amb'],fill_value=0)
data.loc[(data['property_type']=='apartment')&(data['rooms'].isna()),'rooms'] = data[(data['property_type']=='apartment')&(data['rooms'].isna())]['rooms'].add(data[data['property_type']=='apartment']['clean_amb'],fill_value=0)
#data.loc[(data['property_type']=='store')&(data['clean_fl']).notnull()]
data.loc[(data['property_type']=='apartment')]['rooms'].value_counts(dropna=False,sort=True)
#data.loc[(data['property_type']=='PH')&(data['rooms'].isna()),'description'].sample(10).values
data.drop(['clean_amb','temp_amb'],axis=1,inplace=True)
data.isna().sum()
plt.figure(figsize=(8,8))
plt.grid(True)
ax = sns.boxplot(x="property_type", y="rooms", data=data, palette="Set1")
plt.title('Distribución de ambientes por tipo de propiedad')
plt.xlabel('Tipo de Propiedad')
plt.ylabel('N° de Ambientes')
```
# Variables Dummy: Amenities
Se busca crear variables dummy de características que sumen para la regresión del desafío 2.
```
sin_cochera = data.loc[:,'description'].str.contains(r"(?:sin cochera|sin estacionamiento|sin garage)",flags=re.IGNORECASE,regex=True)
sin_cochera = sin_cochera.apply(lambda x: 0 if x else np.nan)
cochera = data.loc[sin_cochera.isna(),'description'].str.contains(r"(?:cochera|estacionamiento|garage)",flags=re.IGNORECASE,regex=True).astype(float)
cochera = cochera.add(sin_cochera,fill_value=0)
sin_gimnasio = data.loc[:,'description'].str.contains(r"(?:sin gym|sin gimnasio|sin gimnasia)",flags=re.IGNORECASE,regex=True)
sin_gimnasio = sin_gimnasio.apply(lambda x: 0 if x else np.nan)
gimnasio = data.loc[:,'description'].str.contains(r"(?:gym|gimnasio|gimnasia)",flags=re.IGNORECASE,regex=True).astype(float)
gimnasio = gimnasio.add(sin_gimnasio,fill_value=0)
sin_piscina = data.loc[:,'description'].str.contains(r"(?:sin piscina|sin pisicina|sin pileta)",flags=re.IGNORECASE,regex=True)
sin_piscina = sin_piscina.apply(lambda x: 0 if x else np.nan)
piscina = data.loc[:,'description'].str.contains(r"(?:piscina|pisicina|pileta)",flags=re.IGNORECASE,regex=True).astype(float)
piscina = piscina.add(sin_piscina,fill_value=0)
sin_sum = data.loc[:,'description'].str.contains(r"(?:sin SUM|sin salón|sin salon|sin playroom)",flags=re.IGNORECASE,regex=True)
sin_sum = sin_sum.apply(lambda x: 0 if x else np.nan)
SUM = data.loc[:,'description'].str.contains(r"(?:SUM|salón|salon|playroom)",flags=re.IGNORECASE,regex=True).astype(float)
SUM = SUM.add(sin_gimnasio,fill_value=0)
sin_laundry = data.loc[:,'description'].str.contains(r"(?:sin laundy|sin laundry|sin lavadero)",flags=re.IGNORECASE,regex=True)
sin_laundry = sin_laundry.apply(lambda x: 0 if x else np.nan)
laundry = data.loc[:,'description'].str.contains(r"(?:laundy|laundry|lavadero)",flags=re.IGNORECASE,regex=True).astype(float)
laundry = laundry.add(sin_gimnasio,fill_value=0)
sin_balcon = data.loc[:,'description'].str.contains(r"(?:sin balcon|sin balcón)",flags=re.IGNORECASE,regex=True)
sin_balcon = sin_balcon.apply(lambda x: 0 if x else np.nan)
balcon = data.loc[:,'description'].str.contains(r"(?:balcon|balcón)",flags=re.IGNORECASE,regex=True).astype(float)
balcon = balcon.add(sin_gimnasio,fill_value=0)
sin_patio = data.loc[:,'description'].str.contains(r"(?:sin patio|sin jardín|sin jardin)",flags=re.IGNORECASE,regex=True)
sin_patio = sin_patio.apply(lambda x: 0 if x else np.nan)
patio = data.loc[:,'description'].str.contains(r"(?:patio|jardín|jardin)",flags=re.IGNORECASE,regex=True).astype(float)
patio = patio.add(sin_gimnasio,fill_value=0)
amenities = {
'cochera':cochera,
'gimnasio':gimnasio,
'piscina':piscina,
'SUM':SUM,
'laundry':laundry,
'balcon':balcon,
'patio':patio,
}
for key,item in amenities.items():
data[key]=item
data.columns
data.isna().sum()
```
# Limpieza de Superficie (Total y Cubierta)
De las inspecciónes de la columnas "surface_total_in_m2" y "surface_covered_in_m2", vemos que los valores comienzan desde el 0.0, por ende consideraremos superficies validas aquellas que sean igual o superen los diez metros cuadrados.
```
min_sup_valida = 10
data.loc[data['surface_total_in_m2']<min_sup_valida,'surface_total_in_m2']=np.nan
data.loc[data['surface_covered_in_m2']<min_sup_valida,'surface_total_in_m2']=np.nan
```
La mediana de las superficies por la cantidad de ambientes. En los casos de 1 y 2 ambientes parece lógico.
```
apartment_df = data.groupby('property_type').get_group('apartment')
apartment_df[['rooms','surface_total_in_m2','surface_covered_in_m2']].groupby('rooms').median()
data.loc[(data['property_type']=='apartment'),'surface_total_in_m2'].isna().sum()
apartment_df = data.loc[data['property_type']=='apartment'].copy()
house_df = data.loc[data['property_type']=='house'].copy()
ph_df = data.loc[data['property_type']=='PH'].copy()
store_df = data.loc[data['property_type']=='store'].copy()
plt.figure(figsize=(14,10))
plt.subplot(221)
plt.grid(True)
#ax = sns.boxplot(x="rooms", y="surface_total_in_m2", data=apartment_df, palette="Set1")
ax = sns.boxplot(x="rooms", y="surface_covered_in_m2", data=apartment_df, palette="Set1")
plt.title('Superficie por número de ambientes (departamentos)')
plt.xlabel('Número de Ambientes')
plt.ylabel('Superficie [m2]')
plt.subplot(222)
plt.grid(True)
#ax = sns.boxplot(x="rooms", y="surface_total_in_m2", data=house_df, palette="Set1")
ax = sns.boxplot(x="rooms", y="surface_covered_in_m2", data=house_df, palette="Set1")
plt.title('Superficie por número de ambientes (Casa)')
plt.xlabel('Número de Ambientes')
plt.ylabel('Superficie [m2]')
plt.subplot(223)
plt.grid(True)
#ax = sns.boxplot(x="rooms", y="surface_total_in_m2", data=ph_df, palette="Set1")
ax = sns.boxplot(x="rooms", y="surface_covered_in_m2", data=ph_df, palette="Set1")
plt.title('Superficie por número de ambientes (PH)')
plt.xlabel('Número de Ambientes')
plt.ylabel('Superficie [m2]')
plt.subplot(224)
plt.grid(True)
#ax = sns.boxplot(x="rooms", y="surface_total_in_m2", data=store_df, palette="Set1")
ax = sns.boxplot(x="rooms", y="surface_covered_in_m2", data=store_df, palette="Set1")
plt.title('Superficie por número de ambientes (Store)')
plt.xlabel('Número de Ambientes')
plt.ylabel('Superficie [m2]')
plt.savefig("../Images/Rooms-Before.png")
```
## Limpiando Superficies
### apartment
```
sup_tot_limits_apt = []
sup_cub_limits_apt = []
for i in range(1,8,1):
a=calculate_iqr_values(data.loc[(data['rooms']==i)&(data['property_type']=='apartment')&(data['surface_total_in_m2'].notnull()),'surface_total_in_m2'])[1]
sup_tot_limits_apt.append((10,a))
b=calculate_iqr_values(data.loc[(data['rooms']==i)&(data['property_type']=='apartment')&(data['surface_covered_in_m2'].notnull()),'surface_covered_in_m2'])[1]
sup_cub_limits_apt.append((10,b))
print(sup_tot_limits_apt)
print(sup_cub_limits_apt)
for i in range(7):
data.loc[(data['property_type']=='apartment')&(data['surface_total_in_m2']>sup_tot_limits_apt[i][1]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']=='apartment')&(data['surface_covered_in_m2']>sup_cub_limits_apt[i][1]),'surface_covered_in_m2']=np.nan
data.loc[(data['property_type']=='apartment')&(data['surface_total_in_m2']<sup_tot_limits_apt[i][0]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']=='apartment')&(data['surface_covered_in_m2']<sup_cub_limits_apt[i][0]),'surface_covered_in_m2']=np.nan
apartment_df[['rooms','surface_total_in_m2','surface_covered_in_m2']].groupby('rooms').median()
```
### house
```
sup_tot_limits_hs = []
sup_cub_limits_hs = []
for i in range(1,8,1):
a=calculate_iqr_values(data.loc[(data['rooms']==i)&(data['property_type']=='house')&(data['surface_total_in_m2'].notnull()),'surface_total_in_m2'])[1]
sup_tot_limits_hs.append((10,a))
b=calculate_iqr_values(data.loc[(data['rooms']==i)&(data['property_type']=='house')&(data['surface_covered_in_m2'].notnull()),'surface_covered_in_m2'])[1]
sup_cub_limits_hs.append((10,b))
print(sup_tot_limits_hs)
print(sup_cub_limits_hs)
for i in range(7):
data.loc[(data['property_type']=='house')&(data['surface_total_in_m2']>sup_tot_limits_hs[i][1]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']=='house')&(data['surface_covered_in_m2']>sup_cub_limits_hs[i][1]),'surface_covered_in_m2']=np.nan
data.loc[(data['property_type']=='house')&(data['surface_total_in_m2']<sup_tot_limits_hs[i][0]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']=='house')&(data['surface_covered_in_m2']<sup_cub_limits_hs[i][0]),'surface_covered_in_m2']=np.nan
#apartment_df[['rooms','surface_total_in_m2','surface_covered_in_m2']].groupby('rooms').median()
```
### PH
```
sup_tot_limits_ph = []
sup_cub_limits_ph = []
for i in range(1,6,1):
a=calculate_iqr_values(data.loc[(data['rooms']==i)&(data['property_type']=='PH')&(data['surface_total_in_m2'].notnull()),'surface_total_in_m2'])[1]
sup_tot_limits_ph.append((10,a))
b=calculate_iqr_values(data.loc[(data['rooms']==i)&(data['property_type']=='PH')&(data['surface_covered_in_m2'].notnull()),'surface_covered_in_m2'])[1]
sup_cub_limits_ph.append((10,b))
print(sup_tot_limits_ph)
print(sup_cub_limits_ph)
for i in range(5):
data.loc[(data['property_type']=='PH')&(data['surface_total_in_m2']>sup_tot_limits_ph[i][1]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']=='PH')&(data['surface_covered_in_m2']>sup_cub_limits_ph[i][1]),'surface_covered_in_m2']=np.nan
data.loc[(data['property_type']=='PH')&(data['surface_total_in_m2']<sup_tot_limits_ph[i][0]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']=='PH')&(data['surface_covered_in_m2']<sup_cub_limits_ph[i][0]),'surface_covered_in_m2']=np.nan
#apartment_df[['rooms','surface_total_in_m2','surface_covered_in_m2']].groupby('rooms').median()
```
### store
```
sup_tot_limits_str = []
sup_cub_limits_str = []
for i in range(1,5,1):
a=calculate_iqr_values(data.loc[(data['rooms']==i)&(data['property_type']=='store')&(data['surface_total_in_m2'].notnull()),'surface_total_in_m2'])[1]
sup_tot_limits_str.append((10,a))
b=calculate_iqr_values(data.loc[(data['rooms']==i)&(data['property_type']=='store')&(data['surface_covered_in_m2'].notnull()),'surface_covered_in_m2'])[1]
sup_cub_limits_str.append((10,b))
print(sup_tot_limits_str)
print(sup_cub_limits_str)
for i in range(4):
data.loc[(data['property_type']=='store')&(data['surface_total_in_m2']>sup_tot_limits_str[i][1]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']=='store')&(data['surface_covered_in_m2']>sup_cub_limits_str[i][1]),'surface_covered_in_m2']=np.nan
data.loc[(data['property_type']=='store')&(data['surface_total_in_m2']<sup_tot_limits_str[i][0]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']=='store')&(data['surface_covered_in_m2']<sup_cub_limits_str[i][0]),'surface_covered_in_m2']=np.nan
#apartment_df[['rooms','surface_total_in_m2','surface_covered_in_m2']].groupby('rooms').median()
```
Me armo un dataframe con los datos de apartment y en cada fila la superficie segun número de ambientes.
```
apartment_df = data.loc[data['property_type']=='apartment'].copy()
house_df = data.loc[data['property_type']=='house'].copy()
ph_df = data.loc[data['property_type']=='PH'].copy()
store_df = data.loc[data['property_type']=='store'].copy()
plt.figure(figsize=(14,10))
plt.subplot(221)
plt.grid(True)
#ax = sns.boxplot(x="rooms", y="surface_total_in_m2", data=apartment_df, palette="Set1")
ax = sns.boxplot(x="rooms", y="surface_covered_in_m2", data=apartment_df, palette="Set1")
plt.title('Superficie por número de ambientes (departamentos)')
plt.xlabel('Número de Ambientes')
plt.ylabel('Superficie [m2]')
plt.subplot(222)
plt.grid(True)
#ax = sns.boxplot(x="rooms", y="surface_total_in_m2", data=house_df, palette="Set1")
ax = sns.boxplot(x="rooms", y="surface_covered_in_m2", data=house_df, palette="Set1")
plt.title('Superficie por número de ambientes (Casa)')
plt.xlabel('Número de Ambientes')
plt.ylabel('Superficie [m2]')
plt.subplot(223)
plt.grid(True)
#ax = sns.boxplot(x="rooms", y="surface_total_in_m2", data=ph_df, palette="Set1")
ax = sns.boxplot(x="rooms", y="surface_covered_in_m2", data=ph_df, palette="Set1")
plt.title('Superficie por número de ambientes (PH)')
plt.xlabel('Número de Ambientes')
plt.ylabel('Superficie [m2]')
plt.subplot(224)
plt.grid(True)
#ax = sns.boxplot(x="rooms", y="surface_total_in_m2", data=store_df, palette="Set1")
ax = sns.boxplot(x="rooms", y="surface_covered_in_m2", data=store_df, palette="Set1")
plt.title('Superficie por número de ambientes (Store)')
plt.xlabel('Número de Ambientes')
plt.ylabel('Superficie [m2]')
plt.savefig("../Images/Rooms-After.png")
```
## Funciones para búsqueda de superficie
```
def sup_total_search(text):
pattern = r'(?:lote de|sup\.\s?total|superficie total|sup\.\s?tot\.)\s?:?\s*(\d+[,|\.]?\d*)\s?(?:m2|mts2|mtrs2|metros\s?cuadrados|m²)'
regex = re.compile(pattern, flags = re.IGNORECASE)
sup_tot_lst = regex.findall(text)
if not sup_tot_lst:
return np.nan
if len(sup_tot_lst)!=1:
return np.nan
sup=pd.to_numeric(sup_tot_lst[0].replace(',','.'))
if sup<10:
return np.nan
return sup
def sup_total_search2(text):
pattern = r'(\d+[,|\.]?\d*)\s?(?:m2|mts2|mtrs2|metros\s?cuadrados|m²)'
regex = re.compile(pattern, flags = re.IGNORECASE)
sup_tot_lst = regex.findall(text)
if not sup_tot_lst:
return np.nan
if len(sup_tot_lst)>1:
sup_lst = [pd.to_numeric(item.replace(',','.')) for item in sup_tot_lst]
if not sup_lst:
return np.nan
elif len(sup_lst)>1:
return np.nan
elif sup_lst<10:
return np.nan
else:
return sup_lst[0]
if len(sup_tot_lst)==1:
return pd.to_numeric(sup_tot_lst[0].replace(',','.'))
def sup_cub_search(text):
pattern = r'(?:superficie\s?cubierta:?|sup\.?\s?cub.?:?|sub\.?\s?cubierta:?|metros.\s?cubiertos:?|cub\.?)\s?:?\s?(\d+,?\.?\d*)\s?(?:m2|mt2|mtrs2|mtrs\.?|mts\.?||m²)'
regex = re.compile(pattern, flags = re.IGNORECASE)
sup_cub_lst = regex.findall(text)
if not sup_cub_lst:
return np.nan
sup_cub_lst = [item.replace(',','.') for item in sup_cub_lst]
if len(sup_cub_lst)==1:
sup=pd.to_numeric(sup_cub_lst[0])
if sup<10:
return np.nan
else:
return sup
else:
return np.nan
#return(max(pd.to_numeric(sup_cub_lst)))
def sup_cub_search2(text):
pattern = r'(\d+,?\.?\d*)\s?(?:m2|mtrs2|mtrs\.?|mts\.?|m²|metros|metros\s?cuadrados|metros\s?cuadrados\s?de|),?\s?(?:construidos|cubiertos|cub\.?)'
regex = re.compile(pattern, flags = re.IGNORECASE)
sup_cub_lst = regex.findall(text)
if not sup_cub_lst:
return np.nan
sup_cub_lst = [item.replace(',','.') for item in sup_cub_lst]
if len(sup_cub_lst)==1:
sup=pd.to_numeric(sup_cub_lst[0])
if sup<10:
return np.nan
else:
return sup
else:
return np.nan
#return(max(pd.to_numeric(sup_cub_lst)))
```
## Columnas temporales
Columnas temporales:
temp_amb: columna para obtener el resultado del apply de alguna función con RegEx. Puede usarse sobre 'title' o 'description'. empieza en np.nan
clean_amb: columna para ir acumulando los resultados de cada función con RegEx. Puede usarse sobre 'title' o 'description'. empieza inicializada con los valores originales de 'rooms'.
```
#Columnas temporales para superficie cubierta
data['clean_sup_cub']=data['surface_covered_in_m2']
data['temp_sup_cub']=np.nan
#Columnas temporales para superficie total
data['clean_sup_tot']=data['surface_total_in_m2']
data['temp_sup_tot']=np.nan
```
## Búsqueda de superficie cubierta
```
#data['description'] = data['description'].fillna("-")
sup_cub_func_lst = [sup_cub_search,sup_cub_search2]
col_lst = ['title','description']
for col in col_lst:
print("------------------------")
print("Columna: {}".format(col))
print("------------------------")
for func in sup_cub_func_lst:
clean_temp_col(data,'temp_sup_cub')
data.loc[data['clean_sup_cub'].isna(),'temp_sup_cub'] = data[data['clean_sup_cub'].isna()][col].apply(func)
data.loc[data['clean_sup_cub'].isna(),'clean_sup_cub'] = data['clean_sup_cub'].add(data['temp_sup_cub'],fill_value=0)
print(func.__name__)
new_found(data,'temp_sup_cub','clean_sup_cub')
```
## Búsqueda de superficie total
```
sup_tot_func_lst = [sup_total_search,sup_total_search2]
col_lst = ['title','description']
for col in col_lst:
print("------------------------")
print("Columna: {}".format(col))
print("------------------------")
for func in sup_tot_func_lst:
clean_temp_col(data,'temp_sup_tot')
data.loc[data['clean_sup_tot'].isna(),'temp_sup_tot'] = data[data['clean_sup_tot'].isna()][col].apply(func)
data.loc[data['clean_sup_tot'].isna(),'clean_sup_tot'] = data['clean_sup_tot'].add(data['temp_sup_tot'],fill_value=0)
print(func.__name__)
new_found(data,'temp_sup_tot','clean_sup_tot')
```
Filtrar los nuevos valores con los outliers de antes.
```
sup_tot_limits = [sup_tot_limits_apt,sup_tot_limits_hs,sup_tot_limits_ph,sup_tot_limits_str]
sup_cub_limits = [sup_cub_limits_apt,sup_cub_limits_hs,sup_cub_limits_ph,sup_cub_limits_str]
property_type = ('apartment','house','PH','store')
for tipo,limites_tot,limites_cub in zip(property_type,sup_tot_limits,sup_cub_limits):
print(tipo,limites_tot,limites_cub)
data['surface_total_in_m2'] = data['clean_sup_tot']
data['surface_covered_in_m2'] = data['clean_sup_cub']
data.drop(['clean_sup_tot','temp_sup_tot','clean_sup_cub','temp_sup_cub'],axis=1,inplace=True)
for tipo,limites_tot,limites_cub in zip(property_type,sup_tot_limits,sup_cub_limits):
for i in range(len(limites_tot)):
data.loc[(data['property_type']==str(tipo))&(data['surface_total_in_m2']>limites_tot[i][1]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']==str(tipo))&(data['surface_covered_in_m2']>limites_cub[i][1]),'surface_covered_in_m2']=np.nan
data.loc[(data['property_type']==str(tipo))&(data['surface_total_in_m2']<limites_tot[i][0]),'surface_total_in_m2']=np.nan
data.loc[(data['property_type']==str(tipo))&(data['surface_covered_in_m2']<limites_cub[i][0]),'surface_covered_in_m2']=np.nan
#data.to_csv('../Data/falta_sup_price.csv')
```
## Imputación de superficies
Se calcula el índice de construcción por zona y por tipo de propiedad.
El índice se define como:
<center>$Indice\ de\ Construcción = \frac{Superficie Cubierta}{Superficie Total}$</center>
Luego, se calcula los valores faltantes de Superficie Cubierta multiplicando al índice por la Superficie Total.
Y, la Superficie Total como la Superficie Cubierta dividido el Indice
```
data['construction_index']=data['surface_covered_in_m2']/data['surface_total_in_m2']
place_const_index = data[['property_type','place_name','construction_index']].groupby(['place_name','property_type']).mean()
data.drop('construction_index',axis=1,inplace=True)
data = pd.merge(data,place_const_index,on=['place_name','property_type'],how='inner')
falta_sup_cub_index = data.loc[data['construction_index'].notnull()&(data['surface_covered_in_m2'].isna())&(data['surface_total_in_m2'].notnull()),['surface_covered_in_m2','surface_total_in_m2','construction_index']].index
data.loc[falta_sup_cub_index,'surface_covered_in_m2'] = data.loc[falta_sup_cub_index,'construction_index']*data.loc[falta_sup_cub_index,'surface_total_in_m2']
falta_sup_tot_index = data.loc[data['construction_index'].notnull()&(data['surface_covered_in_m2'].notnull())&(data['surface_total_in_m2'].isna()),['surface_covered_in_m2','surface_total_in_m2','construction_index']].index
data.loc[falta_sup_tot_index,'surface_total_in_m2'] = data.loc[falta_sup_tot_index,'surface_covered_in_m2']/data.loc[falta_sup_tot_index,'construction_index']
data.isna().sum()
```
# ANÁLISIS DE LAS DIFERENTES COTIZACIONES
En primer lugar evaluamos las diferentes monedas que existen en el dataset.
```
data['currency'].value_counts()
```
de la inspección de la columna "currency", podemos ver que salvo 3 registros, toda la información está cotizada en
Pesos Argentinos o Dólares. La decisión será chequear si esos 3 registros se tratan de errores de imputación y corregir o
convertir los mismos a dólares para manejar una sola divisa de cambio.
La elección se basa además en la descripción del dataset recibido ya que en "currency" sólo contempla la moneda ARS y DOL.
#### Chequeamos Tipos de Cambio presentes en el Dataset
#### Valores con currency ARS
```
((data.loc[data['currency']=='ARS','price_aprox_local_currency'])/(data.loc[data['currency']=='ARS','price_aprox_usd'])).round(decimals=4).value_counts()
((data.loc[data['currency']=='ARS','price'])/(data.loc[data['currency']=='ARS','price_aprox_usd'])).round(decimals=4).value_counts()
```
vemos que cuando el aviso es ingresado con currency==ARS están presentes en el dataset dos tipos de cambio :
uno para convertir ARS a USD = TC Comprador
Otro para convertir USD a ARS = TC Vendedor
Definimos las variables
```
TC_comprador=((data.loc[data['currency']=='ARS','price_aprox_local_currency'])/(data.loc[data['currency']=='ARS','price_aprox_usd'])).round(decimals=4).value_counts()
TC_comprador
TC_vendedor=((data.loc[data['currency']=='ARS','price'])/(data.loc[data['currency']=='ARS','price_aprox_usd'])).round(decimals=4).mean()
TC_vendedor
```
#### Valores con currency USD
```
((data.loc[data['currency']=='USD','price'])==(data.loc[data['currency']=='USD','price_aprox_usd'])).value_counts()
((data.loc[data['currency']=='USD','price_aprox_local_currency'])/(data.loc[data['currency']=='USD','price_aprox_usd'])).round(decimals=4).value_counts()
```
Para los valores ingresados en dólares 'price' coincide con 'price_aprox_usd'
El tipo de cambio entre 'price_aprox_local_currency' (moneda local, ARS) y el dolar es el ya visto como "TC_comprador"
Hacemos ahora un filtro del registro ingresado con moneda uruguaya (UYU), calculando el tipo de cambio original (columna "price") y revisamos además el correspondiente a la moneda americana
```
data['currency']=="UYU"
data_UYU= data.loc[data['currency']=="UYU", ["price", "price_aprox_local_currency","price_aprox_usd","place_with_parent_names"]]
data_UYU
data_UYU_TC=data_UYU["price"]/data_UYU['price_aprox_usd']
data_USD_TC=data_UYU["price_aprox_local_currency"]/data_UYU['price_aprox_usd']
print(data_UYU)
print(data_UYU_TC)
print(data_USD_TC)
```
En este caso vemos que se trata de una propiedad vendida en territorio argentino (provincia de Mendoza), que se ingresó con 3 cotizaciones diferentes. Vamos a depurar la fila, dejando los precios establecidos en ARS ya que Price no coincide con Price_aprox_usd y utiliza TC_comprador
```
data.loc[data['currency']=="UYU","price"]=data.loc[data['currency']=="UYU","price_aprox_usd"]*TC_vendedor
data['currency']=="UYU"
data_UYU= data.loc[data['currency']=="UYU", ["price", "price_aprox_local_currency","price_aprox_usd","place_with_parent_names"]]
data_UYU
data_UYU_TC=data_UYU["price"]/data_UYU['price_aprox_usd']
data_USD_TC=data_UYU["price_aprox_local_currency"]/data_UYU['price_aprox_usd']
print(data_UYU)
print(data_UYU_TC)
print(data_USD_TC)
print(data_UYU.index)
```
resta sólo cambiar la moneda
```
data.loc[data['currency']=='UYU', 'currency']="ARS"
```
chequeamos que el cambio se haya efectuado correctamente
```
data.iloc[data_UYU.index,:]
```
Hacemos el mismo procedimiento con las propiedades cotizadas con moneda peruana ("PEN")
```
#data['currency']=="PEN"
data_PEN=data.loc[data['currency']=="PEN", ["price", "price_aprox_local_currency","price_aprox_usd","country_name","place_with_parent_names"]]
data_PEN_TC=data_PEN["price"]/data_PEN['price_aprox_usd']
data_USD_TC=data_PEN["price_aprox_local_currency"]/data_PEN['price_aprox_usd']
print(data_PEN)
print(data_PEN_TC)
print(data_USD_TC)
```
Al igual que en caso anterior, se tratn de propiedades vendidas en CABA, que cuentan con dos tipos de cambio diferentes.
Se decide unificar a ARS
```
data.loc[data['currency']=="PEN","price"]=data.loc[data['currency']=="PEN","price_aprox_usd"]*TC_vendedor
data.iloc[data_PEN.index,:]
data.loc[data['currency']=='PEN', 'currency']="ARS"
```
Verificamos que se hayan aplicado corréctamente los cambios
```
data.iloc[data_PEN.index,:][['price', "currency", "price_aprox_local_currency","price_aprox_usd","country_name","place_with_parent_names"]]
```
## Filtrado y limpieza de OUTLIERS en las Columnas de Precios
## Columna "price_per_m2"
```
data.loc[(data['currency']=='ARS')&(data['price_per_m2'].notnull()),'price_per_m2'].describe()
price_m2_ARS=data.loc[(data['currency']=='ARS')&(data['price_per_m2'].notnull()),'price_per_m2']
print ('La mediana de precios por m2 en pesos es: {0:.2f}'.format(price_m2_ARS.median()),'(registros con currency ARS)')
print ('La media de de precios por m2 en pesos es: {0:.2f}'.format(price_m2_ARS.mean()),'(registros con currency ARS)')
print ('El desvio de precios por m2 en pesos es : {0:.2f}'.format(price_m2_ARS.std()),'(registros con currency ARS)')
data.loc[(data['currency']=='USD')&(data['price_per_m2'].notnull()),'price_per_m2'].describe()
price_per_m2_USD=data.loc[(data['currency']=='USD')&(data['price_per_m2'].notnull()),'price_per_m2']
print ('La mediana de precios por m2 en dólares es: {0:.2f}'.format(price_m2_ARS.median()),'(registros con currency USD)')
print ('La media de de precios por m2 en dólares es: {0:.2f}'.format(price_m2_ARS.mean()),'(registros con currency USD)')
print ('El desvio de precios por m2 en dólares es: {0:.2f}'.format(price_m2_ARS.std()),'(registros con currency USD)')
```
## Estado Inicial de Outliers
```
plt.figure(figsize=(8,8))
plt.grid(True)
ax = sns.boxplot(x="currency", y="price_per_m2", data=data, palette="Set1")
plt.title('price_per_m2 / Outliers Estado Inicial')
plt.xlabel('Currency')
plt.ylabel('Precio por M2');
plt.savefig('../Images/Price_m2_before.png')
```
## Cálculo de outliers con currency=ARS
```
min_price_ARS_per_m2 = 1800
iqr_values_ARS=calculate_iqr_values(data.loc[(data['price_per_m2'].notnull())&(data['currency']=='ARS'),'price_per_m2'])
print(data.loc[(data['currency']=='ARS')&(data['price_per_m2'].notnull()),'price_per_m2'].apply(check_outliers,args=iqr_values_ARS).unique())
print(iqr_values_ARS)
data.loc[(data['currency']=='ARS')&((data['price_per_m2']>iqr_values_ARS[1])|(data['price_per_m2']<min_price_ARS_per_m2)),'price_per_m2']=np.nan
```
## Cálculo de outliers con currency=USD
```
min_price_USD_per_m2 = 100
iqr_values_USD=calculate_iqr_values(data.loc[(data['price_per_m2'].notnull())&(data['currency']=='USD'),'price_per_m2'])
print(data.loc[(data['currency']=='USD')&(data['price_per_m2'].notnull()),'price_per_m2'].apply(check_outliers,args=iqr_values_USD).unique())
print(iqr_values_USD)
data.loc[(data['currency']=='USD')&((data['price_per_m2']>iqr_values_ARS[1])|(data['price_per_m2']<min_price_USD_per_m2)),'price_per_m2']=np.nan
```
## Revisamos La Distribución Corregida de Valores
```
plt.figure(figsize=(8,8))
plt.grid(True)
ax = sns.boxplot(x="currency", y="price_per_m2", data=data, palette="Set1")
plt.title('price_per_m2 Distribución ')
plt.xlabel('Currency USD')
plt.ylabel('Precio por M2');
plt.savefig('../Images/Price_m2_after.png')
```
### Verificar Outliers para Columna "price_usd_per_m2"
```
plt.figure(figsize=(15,6))
plt.grid(True)
ax=sns.boxplot(x='price_usd_per_m2', data=data, orient='h', palette="Set1")
plt.title('Distribución de Precios por m2 en USD')
plt.xlabel('Valores')
plt.ylabel('Precio por Metro2')
plt.show()
plt.savefig("../Images/Price_USD_m2-before.png")
calculate_iqr_values(data.loc[data['price_usd_per_m2'].notnull(), "price_usd_per_m2"])
min_price2_USD_per_m2 = 150
iqr_values_price_USD=calculate_iqr_values(data.loc[data['price_usd_per_m2'].notnull(), "price_usd_per_m2"])
data.loc[(data['price_usd_per_m2']>iqr_values_price_USD[1])|(data['price_usd_per_m2']<min_price2_USD_per_m2),'price_usd_per_m2']=np.nan
plt.figure(figsize=(15,6))
plt.grid(True)
ax=sns.boxplot(x='price_usd_per_m2', data=data, orient='h', palette="Set1")
plt.title('Distribución de Precios por m2 en USD')
plt.xlabel('Valores')
plt.ylabel('Precio por Metro2')
plt.show()
plt.savefig("../Images/Price_USD_m2-after.png")
```
## Usamos regex para buscar los precios faltantes en las columnas "description" y "title"
```
def price_search(text):
pattern = r'(?:USD|U\$D|DOLL|DOL.|U\$S|dolares|dólares|precio|precio total)\s?(\d+,?\.?\d*,?\.?\d*)'
regex = re.compile(pattern, flags = re.IGNORECASE)
price_lst = regex.findall(text)
price_lst = [float(item.replace(',','').replace('.','')) for item in price_lst]
if not price_lst:
return np.nan
if len(price_lst)>1:
if max(price_lst)>35000:
return max(price_lst)
else:
return np.nan
if price_lst[0]>35000:
return price_lst[0]
else:
return np.nan
data.loc[data['price_aprox_usd'].isna(),'description'].apply(price_search).notnull().sum()
data.loc[data['price_aprox_usd'].isna(),'title'].apply(price_search).notnull().sum()
def price_search2(text):
pattern = r'(\d+,?\.?\d*,?\.?\d*)\s?(?:USD|U\$D|DOLL|DOL.|U\$S|dolares|dólares)+'
regex = re.compile(pattern, flags = re.IGNORECASE)
price_lst = regex.findall(text)
price_lst = [float(item.replace(',','').replace('.','')) for item in price_lst]
if not price_lst:
return np.nan
if len(price_lst)>1:
if max(price_lst)>35000:
return max(price_lst)
else:
return np.nan
if price_lst[0]>35000:
return price_lst[0]
else:
return np.nan
```
Aplicamos las dos funciones a las dos columnas
```
data.iloc[:,10:18].isnull().sum()
data["price_aprox_usd"].fillna(value=data.loc[data['price_aprox_usd'].isna(),'title'].apply(price_search), inplace=True)
data["price_aprox_usd"].fillna(value=data.loc[data['price_aprox_usd'].isna(),'title'].apply(price_search2), inplace=True)
data["price_aprox_usd"].fillna(value=data.loc[data['price_aprox_usd'].isna(),'description'].apply(price_search), inplace=True)
data["price_aprox_usd"].fillna(value=data.loc[data['price_aprox_usd'].isna(),'description'].apply(price_search2), inplace=True)
data.iloc[:,10:18].isnull().sum()
#REVISAR SI ESTA FILA DESAPARECE ANTES
(data.loc[(data['price_aprox_local_currency'].notnull())&(data['currency'].isna())&(data['price'].notnull()),['price','price_aprox_usd','price_aprox_local_currency','currency']])
```
## Completando los valores expresados en pesos
```
def price_search_pesos(text):
pattern = r'(?:\$)\s?(\d+,?\.?\d*,?\.?\d*)'
regex = re.compile(pattern, flags = re.IGNORECASE)
price_lst = regex.findall(text)
price_lst = [float(item.replace(',','').replace('.','')) for item in price_lst]
if not price_lst:
return np.nan
if len(price_lst)>1:
if max(price_lst)>20000:
return max(price_lst)
else:
return np.nan
if len(price_lst)==1:
if price_lst[0]>35000:
return price_lst[0]
else:
return np.nan
data.iloc[:,10:18].isnull().sum()
data["price_aprox_local_currency"].fillna(value=data.loc[data['price_aprox_local_currency'].isna(),'title'].apply(price_search_pesos), inplace=True)
data["price_aprox_local_currency"].fillna(value=data.loc[data['price_aprox_local_currency'].isna(),'description'].apply(price_search_pesos), inplace=True)
data.iloc[:,10:18].isnull().sum()
```
# CÁLCULO DE VALORES FALTANTES CORRESPONDIENTES A VARIABLES ALEATORIAS LINEALMENTE DEPENDIENTES
En el paso anterior se vio, y se hizo una reducción de los tipos de cambio presentes en el dataset. En esa instancia se decidió
trabajar sólo con moneda local (pesos argentinos) y dólar como única divisa.
A continuación, vamos a analizar las variables aleatorias presentes en el dataset, en primer lugar, el peso que se le dá a los metros cubiertos y descubiertos en el precio de la propiedad
```
data.iloc[:,10:18].isnull().sum()
```
#### Variables Aleatorias Linealmente Dependientes
Las siguientes columnas, están formadas por variables que son LD unas de otras. Por lo cuál, vamos a recuperar valores
realizando operaciones algebraicas entre ellas
Los datos recuperados en 'price' se calcularán directamente en dólares
```
data.iloc[1:5,:][['price', 'currency', 'price_aprox_local_currency','price_aprox_usd','surface_total_in_m2', 'price_usd_per_m2', "price_per_m2"]]
```
Partimos con estos datos faltantes
```
data.iloc[:,10:18].isnull().sum()
```
#### Operaciones con Tipo de Cambio
Sabemos que price=price_aprox_usd cuando "currency"==DOL
y que con ese ingreso la relación entre "price" y "price_aprox_local_currency" es el "TC_comprador" ya calculado
```
price_new=data.loc[data["price_aprox_usd"].notnull(), "price_aprox_usd"]
data["price"].fillna(value=price_new, inplace=True)
data["price_aprox_usd"].fillna(value=data.loc[data["price"].notnull(), "price"], inplace=True)
TC_comprador=((data.loc[data['currency']=='ARS','price_aprox_local_currency'])/(data.loc[data['currency']=='ARS','price_aprox_usd'])).round(decimals=4)
TC_comprador.value_counts()
price_aprox_local_currency_new =data["price"]*17.6445
data["price_aprox_local_currency"].fillna(value=price_aprox_local_currency_new, inplace=True)
```
#### Precios por M2
```
price_usd_per_m2_new=data['price_aprox_usd']/data['surface_total_in_m2']
price_per_m2_new=data['price']/data['surface_covered_in_m2']
data["price_usd_per_m2"].fillna(value=price_usd_per_m2_new, inplace=True)
data["price_per_m2"].fillna(value=price_per_m2_new, inplace=True)
```
#### Cálculo de superficies
Calculamos la superficie a partir de los precios en ambas monedas y el correspondiente por m2
```
surface_total_in_m2_new= data["price_aprox_usd"]/data['price_usd_per_m2']
surface_covered_in_m2_new= data["price"]/data['price_per_m2']
data["surface_total_in_m2"].fillna(value=surface_total_in_m2_new, inplace=True)
data["surface_covered_in_m2"].fillna(value=surface_covered_in_m2_new, inplace=True)
# for tipo,limites_tot,limites_cub in zip(property_type,sup_tot_limits,sup_cub_limits):
# for i in range(len(limites_tot)):
# data.loc[(data['property_type']==str(tipo))&(data['surface_total_in_m2']>limites_tot[i][1]),'surface_total_in_m2']=np.nan
# data.loc[(data['property_type']==str(tipo))&(data['surface_covered_in_m2']>limites_cub[i][1]),'surface_covered_in_m2']=np.nan
# data.loc[(data['property_type']==str(tipo))&(data['surface_total_in_m2']<limites_tot[i][0]),'surface_total_in_m2']=np.nan
# data.loc[(data['property_type']==str(tipo))&(data['surface_covered_in_m2']<limites_cub[i][0]),'surface_covered_in_m2']=np.nan
```
#### Cálculo de los precios (Columnas "price" y "price_aprox_usd" a partir de los correspondientes precios por m2
```
price_aprox_usd_new=data['price_usd_per_m2']*data["surface_total_in_m2"]
data["price_aprox_usd"].fillna(value=price_aprox_usd_new, inplace=True)
price_new=data['price_per_m2']*data["surface_covered_in_m2"]
values_price = {'price': price_new, 'currency': 'USD'}
data["price"].fillna(value=values_price, inplace=True)
```
#### Cálculo del precio por m2 en pesos a partir del precio y superficie (operación inversa)
```
price_usd_per_m2_new= data["price_aprox_usd"]/data['surface_total_in_m2']
price_per_m2_new= data["price"]/data['surface_covered_in_m2']
data["price_usd_per_m2"].fillna(value=price_usd_per_m2_new, inplace=True)
data["price_per_m2"].fillna(value=price_per_m2_new, inplace=True)
```
# Columna 'expenses'
La columna de expensas tenía más de 100.000 datos faltantes. Se intentó hacer una búsqueda de esos datos, pero sin mucho éxito (no se pudieron recuperar los suficientes). Además, tomando en cuenta que para el modelo de regresión no tiene valor, se decidió dropear toda esta columna.
## Limpieza Final
Eliminar columnas que no servirán para el siguiente desafío (regresión).
Eliminar datos nulos.
Guardar en un csv los datos limpios.
```
data.columns
data2 = data.copy()
data3 = data2.drop(['geonames_id','lat-lon','lat','lon','expenses','image_thumbnail','construction_index','title','description','properati_url'],axis=1).dropna().copy()
data3.reset_index(inplace=True)
data3.to_csv('../Data/properati_clean.csv')
data3.head()
data3.shape
data3.operation.value_counts()
data3.currency.value_counts()
data3.country_name.value_counts()
```
| github_jupyter |
# MAT281 - Laboratorio N°11
<a id='p1'></a>
## I.- Problema 01
Lista de actos delictivos registrados por el Service de police de la Ville de Montréal (SPVM).
<img src="http://henriquecapriles.com/wp-content/uploads/2017/02/femina_detenida-1080x675.jpg" width="480" height="360" align="center"/>
El conjunto de datos en estudio `interventionscitoyendo.csv` corresponde a todos los delitos entre 2015 y agosto de 2020en Montreal. Cada delito está asociado en grandes categorías, y hay información sobre la ubicación, el momento del día, etc.
> **Nota**: Para más información seguir el siguiente el [link](https://donnees.montreal.ca/ville-de-montreal/actes-criminels).
```
# librerias
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random
from matplotlib.colors import rgb_to_hsv
from statsmodels.tsa.statespace.sarimax import SARIMAX
from metrics_regression import *
# graficos incrustados
plt.style.use('fivethirtyeight')
%matplotlib inline
# parametros esteticos de seaborn
sns.set_palette("deep", desat=.6)
sns.set_context(rc={"figure.figsize": (12, 4)})
# read data
validate_categorie = [
'Introduction', 'Méfait','Vol dans / sur véhicule à moteur', 'Vol de véhicule à moteur',
]
df = pd.read_csv(os.path.join("data","interventionscitoyendo.csv"), sep=",", encoding='latin-1')
df.columns = df.columns.str.lower()
df['date_str'] = df['date'].apply(lambda x: x[:7])
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
df = df.loc[lambda x: x['categorie'].isin(validate_categorie)]
df.head()
```
Como tenemos muchos datos por categoría a nivel de día, agruparemos a nivel de **semanas** y crearemos un nuevo conjuntos de datos llamados `df_week`.
```
df['weekofyear'] = df['date'].dt.week
df['year'] = df['date'].dt.year
df_week = df.groupby(['categorie','weekofyear','year'])['pdq'].sum().reset_index()
dates = df_week.year*100+df_week.weekofyear
df_week['date'] = pd.to_datetime(dates.astype(str) + '0', format='%Y%W%w')
df_week.head()
```
El objetivo de este laboratorio es poder realizar un análisis completo del conjunto de datos en estudio, para eso debe responder las siguientes preguntas:
1. Para cada categoría grafique la serie temporal correspondiente (columna `categorie`).
2. Elegir una categoría dentro de `validate_categorie`, luego cree el conjunto de datos llamado `df_categorie`.
3. Realice un análisis exploratorio de la serie temporal escogida.
4. Aplicar el modelo de pronóstico $SARIMA(p,d,q)x(P,D,Q,S)$, probando varias configuraciones de los hiperparámetros. Encuentre la mejor configuración. Concluya.
5. Para el mejor modelo encontrado, verificar si el residuo corresponde a un ruido blanco.
```
for cat in validate_categorie:
mask= lambda df: (df["categorie"] == cat)
plt.figure(figsize=(10, 6))
sns.lineplot(
x='date',
y='pdq',
hue='categorie',
data=df_week[mask],
ci = None
)
plt.show()
random.seed(666)
categorie = validate_categorie[random.randint(0,3)]
mask = lambda df: df["categorie"] == categorie
df_categorie = df_week[mask]
df_categorie.head()
#Exploracion de datos
# Resumen de la informacion
def resumen_por_columna(df,cols):
pd_series = df[cols]
# elementos distintos
l_unique = pd_series.unique()
# elementos vacios
l_vacios = pd_series[pd_series.isna()]
df_info = pd.DataFrame({
'columna': [cols],
'unicos': [len(l_unique)],
'vacios': [len(l_vacios)]
})
return df_info
#exploracion de datos nulos
frames = []
for col in df_categorie.columns:
aux_df = resumen_por_columna(df_categorie,col)
frames.append(aux_df)
df_info = pd.concat(frames).reset_index(drop=True)
df_info
#revisar la integridad de la columna year
df_categorie["year"].unique()
#eliminar datos donde el año no coincide con la fecha
mask = lambda df: df["year"] == df["date"].dt.year
df_categorie = df_categorie[mask]
df_categorie
#modelo de pronostico
df_y = df_categorie[["pdq","date"]].set_index('date').resample('M').mean()
target_date = "2021-01-01"
mask_ds = df_y.index < target_date
y_train = df_y[mask_ds]
y_test = df_y[~mask_ds]
#plotting the data
y_train["pdq"].plot()
y_test["pdq"].plot()
plt.show()
#definir clase para automatizar el proceso
class SarimaModels:
def __init__(self,params):
self.params = params
@property
def name_model(self):
return f"SARIMA_{self.params[0]}X{self.params[1]}".replace(' ','')
@staticmethod
def test_train_model(y,date):
mask_ds = y.index < date
y_train = y[mask_ds]
y_test = y[~mask_ds]
return y_train, y_test
def fit_model(self,y,date):
y_train, y_test = self.test_train_model(y,date )
model = SARIMAX(y_train,
order=self.params[0],
seasonal_order=self.params[1],
enforce_stationarity=False,
enforce_invertibility=False)
model_fit = model.fit(disp=0)
return model_fit
def df_testig(self,y,date):
y_train, y_test = self.test_train_model(y,date )
model = SARIMAX(y_train,
order=self.params[0],
seasonal_order=self.params[1],
enforce_stationarity=False,
enforce_invertibility=False)
model_fit = model.fit(disp=0)
start_index = y_test.index.min()
end_index = y_test.index.max()
preds = model_fit.get_prediction(start=start_index,end=end_index, dynamic=False)
df_temp = pd.DataFrame(
{
'y':y_test['pdq'],
'yhat': preds.predicted_mean
}
)
return df_temp
def metrics(self,y,date):
df_temp = self.df_testig(y,date)
df_metrics = summary_metrics(df_temp)
df_metrics['model'] = self.name_model
return df_metrics
# definir parametros
import itertools
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]
params = list(itertools.product(pdq,seasonal_pdq))
target_date = '2021-01-01'
# iterar para los distintos escenarios
frames = []
for param in params:
try:
sarima_model = SarimaModels(param)
df_metrics = sarima_model.metrics(df_y,target_date)
frames.append(df_metrics)
except:
pass
# juntar resultados de las métricas y comparar
df_metrics_result = pd.concat(frames)
df_metrics_result.sort_values(['mae','mape'])
# ajustar mejor modelo
param = [(0,1,0),(1,0,0,12)]
sarima_model = SarimaModels(param)
model_fit = sarima_model.fit_model(df_y,target_date)
best_model = sarima_model.df_testig(df_y,target_date)
best_model.head()
# graficar mejor modelo
preds = best_model['yhat']
ax = df_y['2015':].plot(label='observed')
preds.plot(ax=ax, label='Forecast', alpha=.7, figsize=(14, 7))
ax.set_xlabel('Date')
ax.set_ylabel('pdq')
plt.legend()
plt.show()
# resultados del error
model_fit.plot_diagnostics(figsize=(16, 8))
plt.show()
```
Segun los gra
| github_jupyter |
**About this challenge**
To assess the impact of climate change on Earth's flora and fauna, it is vital to quantify how human activities such as logging, mining, and agriculture are impacting our protected natural areas. Researchers in Mexico have created the VIGIA project, which aims to build a system for autonomous surveillance of protected areas. A first step in such an effort is the ability to recognize the vegetation inside the protected areas. In this competition, you are tasked with creation of an algorithm that can identify a specific type of cactus in aerial imagery.
In this kernel we will be trying to solve this challenge using CNN through **fast.ai library**

**Loading necessary libraries**
```
from fastai.vision import *
from fastai import *
import os
import pandas as pd
import numpy as np
print(os.listdir("../input/"))
train_dir="../input/train/train"
test_dir="../input/test/test"
train = pd.read_csv('../input/train.csv')
test = pd.read_csv("../input/sample_submission.csv")
data_folder = Path("../input")
```
**Analysing the given data**
```
train.head(5)
train.describe()
```
**Getting the Data. **
[reference](https://docs.fast.ai/vision.data.html)
```
test_img = ImageList.from_df(test, path=data_folder/'test', folder='test')
# Applying Data augmentation
trfm = get_transforms(do_flip=True, flip_vert=True, max_rotate=10.0, max_zoom=1.1, max_lighting=0.2, max_warp=0.2, p_affine=0.75, p_lighting=0.75)
train_img = (ImageList.from_df(train, path=data_folder/'train', folder='train')
.split_by_rand_pct(0.01)
.label_from_df()
.add_test(test_img)
.transform(trfm, size=128)
.databunch(path='.', bs=64, device= torch.device('cuda:0'))
.normalize(imagenet_stats)
)
```
**Training the data using appropriate model. We have used [densenet](https://pytorch.org/docs/stable/torchvision/models.html) here**
```
learn = cnn_learner(train_img, models.densenet161, metrics=[error_rate, accuracy])
```
**Finding the suitable learning rate**
```
learn.lr_find()
```
**Plotting the Learning Rate**
```
learn.recorder.plot()
```
**Now training the data based on suitable learning rate**
```
lr = 1e-02
learn.fit_one_cycle(3, slice(lr))
preds,_ = learn.get_preds(ds_type=DatasetType.Test)
test.has_cactus = preds.numpy()[:, 0]
test.to_csv('submission.csv', index=False)
```
**References**
* https://docs.fast.ai/
* https://www.kaggle.com/kenseitrg/simple-fastai-exercise
* https://www.kaggle.com/shahules/getting-started-with-cnn-and-vgg16
| github_jupyter |
ERROR: type should be string, got "https://www.ax.dev/tutorials/\n\n__Tune a CNN on MNIST__\n\n1. [Import](#Import)\n1. [Load MNIST data](#Load-MNIST-data)\n1. [Define a function to optimize](#Define-a-function-to-optimize)\n1. [Run the optimization loop](#Run-the-optimization-loop)\n1. [Plot response surface](#Plot-response-surface)\n1. [Plot best objective as function of the iteration](#Plot-best-objective-as-function-of-the-iteration)\n1. [Train CNN with best hyperparameters and evaluate on test set](#Train-CNN-with-best-hyperparameters-and-evaluate-on-test-set)\n\n# Import\n\n<a id = 'Import'></a>\n\n```\nimport torch\nimport numpy as np\n\nfrom ax.plot.contour import plot_contour\nfrom ax.plot.trace import optimization_trace_single_method\nfrom ax.service.managed_loop import optimize\nfrom ax.utils.notebook.plotting import render, init_notebook_plotting\nfrom ax.utils.tutorials.cnn_utils import load_mnist, train, evaluate\n\ninit_notebook_plotting()\n```\n\n# Load MNIST data\n\n<a id = 'Load-MNIST-data'></a>\n\n```\n#\ntrain_loader, valid_loader, test_loader = load_mnist()\n```\n\n# Define a function to optimize\n\n<a id = 'Define-a-function-to-optimize'></a>\n\n```\n#\ndtype = torch.float\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef train_evaluate(parameterization):\n net = train(\n train_loader=train_loader,\n parameters=parameterization,\n dtype=dtype,\n device=device,\n )\n return evaluate(net=net, data_loader=valid_loader, dtype=dtype, device=device)\n```\n\n# Run the optimization loop\n\n<a id = 'Run-the-optimization-loop'></a>\n\n```\n#\nbest_parameters, values, experiment, model = optimize(\n parameters=[\n {\"name\": \"lr\", \"type\": \"range\", \"bounds\": [1e-6, 0.4], \"log_scale\": True},\n {\"name\": \"momentum\", \"type\": \"range\", \"bounds\": [0.0, 1.0]},\n ],\n evaluation_function=train_evaluate,\n objective_name=\"accuracy\",\n)\n#\nbest_parameters\n#\nmeans, covariances = values\nprint(means)\nprint(covariances)\n```\n\n# Plot response surface\n\n<a id = 'Plot-response-surface'></a>\n\n```\n#\nrender(\n plot_contour(model=model, param_x=\"lr\", param_y=\"momentum\", metric_name=\"accuracy\")\n)\n```\n\n# Plot best objective as function of the iteration\n\n<a id = 'Plot-best-objective-as-function-of-the-iteration'></a>\n\n```\n#\nbest_objectives = np.array(\n [[trial.objective_mean * 100 for trial in experiment.trials.values()]]\n)\nbest_objective_plot = optimization_trace_single_method(\n y=np.maximum.accumulate(best_objectives, axis=1),\n title=\"Model performance vs. # of iters\",\n ylabel=\"Classification accuracy %\",\n)\nrender(best_objective_plot)\n```\n\n# Train CNN with best hyperparameters and evaluate on test set\n\n<a id = 'Train-CNN-with-best-hyperparameters-and-evaluate-on-test-set'></a>\n\n```\n#\ndata = experiment.fetch_data()\ndf = data.df\nbest_arm_name = df.arm_name[df[\"mean\"] == df[\"mean\"].max()].values[0]\nbest_arm = experiment.arms_by_name[best_arm_name]\nbest_arm\n#\nnet = train(\n train_loader=train_loader,\n parameters=best_arm.parameters,\n dtype=dtype,\n device=device,\n)\ntest_accuracy = evaluate(net=net, data_loader=test_loader, dtype=dtype, device=device)\ntest_accuracy\n#\nprint(\"Classification\")\n```\n\n" | github_jupyter |
#### SageMaker Pipelines Tuning Step
This notebook illustrates how a Hyperparameter Tuning Job can be run as a step in a SageMaker Pipeline.
The steps in this pipeline include -
* Preprocessing the abalone dataset
* Running a Hyperparameter Tuning job
* Creating the 2 best models
* Evaluating the performance of the top performing model of the HPO step
* Registering the top model in the model registry using a conditional step based on evaluation metrics
```
import sys
!{sys.executable} -m pip install "sagemaker>=2.48.0"
import os
import boto3
import sagemaker
from sagemaker.estimator import Estimator
from sagemaker.inputs import TrainingInput
from sagemaker.processing import (
ProcessingInput,
ProcessingOutput,
Processor,
ScriptProcessor,
)
from sagemaker import Model
from sagemaker.xgboost import XGBoostPredictor
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.model_metrics import (
MetricsSource,
ModelMetrics,
)
from sagemaker.workflow.parameters import (
ParameterInteger,
ParameterString,
)
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.properties import PropertyFile
from sagemaker.workflow.steps import (
ProcessingStep,
CacheConfig,
TuningStep,
)
from sagemaker.workflow.step_collections import RegisterModel, CreateModelStep
from sagemaker.workflow.conditions import ConditionLessThanOrEqualTo
from sagemaker.workflow.condition_step import ConditionStep
from sagemaker.workflow.functions import Join, JsonGet
from sagemaker.workflow.execution_variables import ExecutionVariables
from sagemaker.tuner import (
ContinuousParameter,
HyperparameterTuner,
WarmStartConfig,
WarmStartTypes,
)
# Create the SageMaker Session
region = sagemaker.Session().boto_region_name
sm_client = boto3.client("sagemaker")
boto_session = boto3.Session(region_name=region)
sagemaker_session = sagemaker.session.Session(boto_session=boto_session, sagemaker_client=sm_client)
# Define variables and parameters needed for the Pipeline steps
role = sagemaker.get_execution_role()
default_bucket = sagemaker_session.default_bucket()
base_job_prefix = "tuning-step-example"
model_package_group_name = "tuning-job-model-packages"
processing_instance_count = ParameterInteger(name="ProcessingInstanceCount", default_value=1)
processing_instance_type = ParameterString(
name="ProcessingInstanceType", default_value="ml.m5.xlarge"
)
training_instance_type = ParameterString(name="TrainingInstanceType", default_value="ml.m5.xlarge")
model_approval_status = ParameterString(
name="ModelApprovalStatus", default_value="PendingManualApproval"
)
input_data = ParameterString(
name="InputDataUrl",
default_value=f"s3://sagemaker-servicecatalog-seedcode-{region}/dataset/abalone-dataset.csv",
)
model_approval_status = ParameterString(
name="ModelApprovalStatus", default_value="PendingManualApproval"
)
# Cache Pipeline steps to reduce execution time on subsequent executions
cache_config = CacheConfig(enable_caching=True, expire_after="30d")
```
#### Data Preparation
An SKLearn processor is used to prepare the dataset for the Hyperparameter Tuning job. Using the script `preprocess.py`, the dataset is featurized and split into train, test, and validation datasets.
The output of this step is used as the input to the TuningStep
```
%%writefile preprocess.py
"""Feature engineers the abalone dataset."""
import argparse
import logging
import os
import pathlib
import requests
import tempfile
import boto3
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
# Since we get a headerless CSV file we specify the column names here.
feature_columns_names = [
"sex",
"length",
"diameter",
"height",
"whole_weight",
"shucked_weight",
"viscera_weight",
"shell_weight",
]
label_column = "rings"
feature_columns_dtype = {
"sex": str,
"length": np.float64,
"diameter": np.float64,
"height": np.float64,
"whole_weight": np.float64,
"shucked_weight": np.float64,
"viscera_weight": np.float64,
"shell_weight": np.float64,
}
label_column_dtype = {"rings": np.float64}
def merge_two_dicts(x, y):
"""Merges two dicts, returning a new copy."""
z = x.copy()
z.update(y)
return z
if __name__ == "__main__":
logger.debug("Starting preprocessing.")
parser = argparse.ArgumentParser()
parser.add_argument("--input-data", type=str, required=True)
args = parser.parse_args()
base_dir = "/opt/ml/processing"
pathlib.Path(f"{base_dir}/data").mkdir(parents=True, exist_ok=True)
input_data = args.input_data
bucket = input_data.split("/")[2]
key = "/".join(input_data.split("/")[3:])
logger.info("Downloading data from bucket: %s, key: %s", bucket, key)
fn = f"{base_dir}/data/abalone-dataset.csv"
s3 = boto3.resource("s3")
s3.Bucket(bucket).download_file(key, fn)
logger.debug("Reading downloaded data.")
df = pd.read_csv(
fn,
header=None,
names=feature_columns_names + [label_column],
dtype=merge_two_dicts(feature_columns_dtype, label_column_dtype),
)
os.unlink(fn)
logger.debug("Defining transformers.")
numeric_features = list(feature_columns_names)
numeric_features.remove("sex")
numeric_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="median")),
("scaler", StandardScaler()),
]
)
categorical_features = ["sex"]
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
preprocess = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
logger.info("Applying transforms.")
y = df.pop("rings")
X_pre = preprocess.fit_transform(df)
y_pre = y.to_numpy().reshape(len(y), 1)
X = np.concatenate((y_pre, X_pre), axis=1)
logger.info("Splitting %d rows of data into train, validation, test datasets.", len(X))
np.random.shuffle(X)
train, validation, test = np.split(X, [int(0.7 * len(X)), int(0.85 * len(X))])
logger.info("Writing out datasets to %s.", base_dir)
pd.DataFrame(train).to_csv(f"{base_dir}/train/train.csv", header=False, index=False)
pd.DataFrame(validation).to_csv(
f"{base_dir}/validation/validation.csv", header=False, index=False
)
pd.DataFrame(test).to_csv(f"{base_dir}/test/test.csv", header=False, index=False)
# Process the training data step using a python script.
# Split the training data set into train, test, and validation datasets
# When defining the ProcessingOutput destination as a dynamic value using the
# Pipeline Execution ID, caching will not be in effect as each time the step runs,
# the step definition changes resulting in new execution. If caching is required,
# the ProcessingOutput definition should be status
sklearn_processor = SKLearnProcessor(
framework_version="0.23-1",
instance_type=processing_instance_type,
instance_count=processing_instance_count,
base_job_name=f"{base_job_prefix}/sklearn-abalone-preprocess",
sagemaker_session=sagemaker_session,
role=role,
)
step_process = ProcessingStep(
name="PreprocessAbaloneDataForHPO",
processor=sklearn_processor,
outputs=[
ProcessingOutput(
output_name="train",
source="/opt/ml/processing/train",
destination=Join(
on="/",
values=[
"s3:/",
default_bucket,
base_job_prefix,
ExecutionVariables.PIPELINE_EXECUTION_ID,
"PreprocessAbaloneDataForHPO",
],
),
),
ProcessingOutput(
output_name="validation",
source="/opt/ml/processing/validation",
destination=Join(
on="/",
values=[
"s3:/",
default_bucket,
base_job_prefix,
ExecutionVariables.PIPELINE_EXECUTION_ID,
"PreprocessAbaloneDataForHPO",
],
),
),
ProcessingOutput(
output_name="test",
source="/opt/ml/processing/test",
destination=Join(
on="/",
values=[
"s3:/",
default_bucket,
base_job_prefix,
ExecutionVariables.PIPELINE_EXECUTION_ID,
"PreprocessAbaloneDataForHPO",
],
),
),
],
code="preprocess.py",
job_arguments=["--input-data", input_data],
)
```
#### Hyperparameter Tuning
Amazon SageMaker automatic model tuning, also known as hyperparameter tuning, finds the best version of a model by running many training jobs on your dataset using the algorithm and ranges of hyperparameters that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by a metric that you choose.
[Valid metrics](https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst#learning-task-parameters) for XGBoost Tuning Job
You can learn more about [Hyperparameter Tuning](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html) in the SageMaker docs.
```
# Define the output path for the model artifacts from the Hyperparameter Tuning Job
model_path = f"s3://{default_bucket}/{base_job_prefix}/AbaloneTrain"
image_uri = sagemaker.image_uris.retrieve(
framework="xgboost",
region=region,
version="1.0-1",
py_version="py3",
instance_type=training_instance_type,
)
xgb_train = Estimator(
image_uri=image_uri,
instance_type=training_instance_type,
instance_count=1,
output_path=model_path,
base_job_name=f"{base_job_prefix}/abalone-train",
sagemaker_session=sagemaker_session,
role=role,
)
xgb_train.set_hyperparameters(
eval_metric="rmse",
objective="reg:squarederror", # Define the object metric for the training job
num_round=50,
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.7,
silent=0,
)
objective_metric_name = "validation:rmse"
hyperparameter_ranges = {
"alpha": ContinuousParameter(0.01, 10, scaling_type="Logarithmic"),
"lambda": ContinuousParameter(0.01, 10, scaling_type="Logarithmic"),
}
tuner_log = HyperparameterTuner(
xgb_train,
objective_metric_name,
hyperparameter_ranges,
max_jobs=3,
max_parallel_jobs=3,
strategy="Random",
objective_type="Minimize",
)
step_tuning = TuningStep(
name="HPTuning",
tuner=tuner_log,
inputs={
"train": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs["train"].S3Output.S3Uri,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"validation"
].S3Output.S3Uri,
content_type="text/csv",
),
},
cache_config=cache_config,
)
```
#### Warm start for Hyperparameter Tuning Job
Use warm start to start a hyperparameter tuning job using one or more previous tuning jobs as a starting point. The results of previous tuning jobs are used to inform which combinations of hyperparameters to search over in the new tuning job. Hyperparameter tuning uses either Bayesian or random search to choose combinations of hyperparameter values from ranges that you specify.
Find more information on [Warm Starts](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-warm-start.html) in the SageMaker docs.
In a training pipeline, the parent tuning job name can be provided as a pipeline parameter if there is an already complete Hyperparameter tuning job that should be used as the basis for the warm start.
This step is left out of the pipeline steps in this notebook. It can be added into the steps while defining the pipeline and the appropriate parent tuning job should be specified.
```
# This is an example to illustrate how a the name of the tuning job from the previous step can be used as the parent tuning job, in practice,
# it is unlikely to have the parent job run before the warm start job on each run. Typically the first tuning job would run and the pipeline
# would be altered to use tuning jobs with a warm start using the first job as the parent job.
parent_tuning_job_name = (
step_tuning.properties.HyperParameterTuningJobName
) # Use the parent tuning job specific to the use case
warm_start_config = WarmStartConfig(
WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM, parents={parent_tuning_job_name}
)
tuner_log_warm_start = HyperparameterTuner(
xgb_train,
objective_metric_name,
hyperparameter_ranges,
max_jobs=3,
max_parallel_jobs=3,
strategy="Random",
objective_type="Minimize",
warm_start_config=warm_start_config,
)
step_tuning_warm_start = TuningStep(
name="HPTuningWarmStart",
tuner=tuner_log_warm_start,
inputs={
"train": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs["train"].S3Output.S3Uri,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"validation"
].S3Output.S3Uri,
content_type="text/csv",
),
},
cache_config=cache_config,
)
```
#### Creating and Registering the best models
After successfully completing the Hyperparameter Tuning job. You can either create SageMaker models from the model artifacts created by the training jobs from the TuningStep or register the models into the Model Registry.
When using the model Registry, if you register multiple models from the TuningStep, they will be registered as versions within the same model package group unless unique model package groups are specified for each RegisterModelStep that is part of the pipeline.
In this example, the two best models from the TuningStep are added to the same model package group in the Model Registry as v0 and v1.
You use the `get_top_model_s3_uri` method of the TuningStep class to get the model artifact from one of the top performing model versions
```
# Creating 2 SageMaker Models
model_bucket_key = f"{default_bucket}/{base_job_prefix}/AbaloneTrain"
best_model = Model(
image_uri=image_uri,
model_data=step_tuning.get_top_model_s3_uri(top_k=0, s3_bucket=model_bucket_key),
sagemaker_session=sagemaker_session,
role=role,
predictor_cls=XGBoostPredictor,
)
step_create_first = CreateModelStep(
name="CreateTopModel",
model=best_model,
inputs=sagemaker.inputs.CreateModelInput(instance_type="ml.m4.large"),
)
second_best_model = Model(
image_uri=image_uri,
model_data=step_tuning.get_top_model_s3_uri(top_k=1, s3_bucket=model_bucket_key),
sagemaker_session=sagemaker_session,
role=role,
predictor_cls=XGBoostPredictor,
)
step_create_second = CreateModelStep(
name="CreateSecondBestModel",
model=second_best_model,
inputs=sagemaker.inputs.CreateModelInput(instance_type="ml.m4.large"),
)
```
#### Evaluate the top model
Use a processing job to evaluate the top model from the tuning step
```
%%writefile evaluate.py
"""Evaluation script for measuring mean squared error."""
import json
import logging
import pathlib
import pickle
import tarfile
import numpy as np
import pandas as pd
import xgboost
from sklearn.metrics import mean_squared_error
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
if __name__ == "__main__":
logger.debug("Starting evaluation.")
model_path = "/opt/ml/processing/model/model.tar.gz"
with tarfile.open(model_path) as tar:
tar.extractall(path=".")
logger.debug("Loading xgboost model.")
model = pickle.load(open("xgboost-model", "rb"))
logger.debug("Reading test data.")
test_path = "/opt/ml/processing/test/test.csv"
df = pd.read_csv(test_path, header=None)
logger.debug("Reading test data.")
y_test = df.iloc[:, 0].to_numpy()
df.drop(df.columns[0], axis=1, inplace=True)
X_test = xgboost.DMatrix(df.values)
logger.info("Performing predictions against test data.")
predictions = model.predict(X_test)
logger.debug("Calculating mean squared error.")
mse = mean_squared_error(y_test, predictions)
std = np.std(y_test - predictions)
report_dict = {
"regression_metrics": {
"mse": {"value": mse, "standard_deviation": std},
},
}
output_dir = "/opt/ml/processing/evaluation"
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
logger.info("Writing out evaluation report with mse: %f", mse)
evaluation_path = f"{output_dir}/evaluation.json"
with open(evaluation_path, "w") as f:
f.write(json.dumps(report_dict))
# A ProcessingStep is used to evaluate the performance of a selected model from the HPO step. In this case, the top performing model
# is evaluated. Based on the results of the evaluation, the model is registered into the Model Registry using a ConditionStep.
script_eval = ScriptProcessor(
image_uri=image_uri,
command=["python3"],
instance_type=processing_instance_type,
instance_count=1,
base_job_name=f"{base_job_prefix}/script-tuning-step-eval",
sagemaker_session=sagemaker_session,
role=role,
)
evaluation_report = PropertyFile(
name="BestTuningModelEvaluationReport",
output_name="evaluation",
path="evaluation.json",
)
# This can be extended to evaluate multiple models from the HPO step
step_eval = ProcessingStep(
name="EvaluateTopModel",
processor=script_eval,
inputs=[
ProcessingInput(
source=step_tuning.get_top_model_s3_uri(top_k=0, s3_bucket=model_bucket_key),
destination="/opt/ml/processing/model",
),
ProcessingInput(
source=step_process.properties.ProcessingOutputConfig.Outputs["test"].S3Output.S3Uri,
destination="/opt/ml/processing/test",
),
],
outputs=[
ProcessingOutput(output_name="evaluation", source="/opt/ml/processing/evaluation"),
],
code="evaluate.py",
property_files=[evaluation_report],
cache_config=cache_config,
)
model_metrics = ModelMetrics(
model_statistics=MetricsSource(
s3_uri="{}/evaluation.json".format(
step_eval.arguments["ProcessingOutputConfig"]["Outputs"][0]["S3Output"]["S3Uri"]
),
content_type="application/json",
)
)
# Register the model in the Model Registry
# Multiple models can be registered into the Model Registry using multiple RegisterModel steps. These models can either be added to the
# same model package group as different versions within the group or the models can be added to different model package groups.
step_register_best = RegisterModel(
name="RegisterBestAbaloneModel",
estimator=xgb_train,
model_data=step_tuning.get_top_model_s3_uri(top_k=0, s3_bucket=model_bucket_key),
content_types=["text/csv"],
response_types=["text/csv"],
inference_instances=["ml.t2.medium", "ml.m5.large"],
transform_instances=["ml.m5.large"],
model_package_group_name=model_package_group_name,
approval_status=model_approval_status,
)
# condition step for evaluating model quality and branching execution
cond_lte = ConditionLessThanOrEqualTo(
left=JsonGet(
step_name=step_eval.name,
property_file=evaluation_report,
json_path="regression_metrics.mse.value",
),
right=6.0,
)
step_cond = ConditionStep(
name="CheckMSEAbaloneEvaluation",
conditions=[cond_lte],
if_steps=[step_register_best],
else_steps=[],
)
pipeline = Pipeline(
name="tuning-step-pipeline",
parameters=[
processing_instance_type,
processing_instance_count,
training_instance_type,
input_data,
model_approval_status,
],
steps=[
step_process,
step_tuning,
step_create_first,
step_create_second,
step_eval,
step_cond,
],
sagemaker_session=sagemaker_session,
)
```
#### Execute the Pipeline
```
import json
definition = json.loads(pipeline.definition())
definition
pipeline.upsert(role_arn=role)
pipeline.start()
```
#### Cleaning up resources
Users are responsible for cleaning up resources created when running this notebook. Specify the ModelName, ModelPackageName, and ModelPackageGroupName that need to be deleted. The model names are generated by the CreateModel step of the Pipeline and the property values are available only in the Pipeline context. To delete the models created by this pipeline, navigate to the Model Registry and Console to find the models to delete.
```
# # Create a SageMaker client
# sm_client = boto3.client("sagemaker")
# # Delete SageMaker Models
# sm_client.delete_model(ModelName="...")
# # Delete Model Packages
# sm_client.delete_model_package(ModelPackageName="...")
# # Delete the Model Package Group
# sm_client.delete_model_package_group(ModelPackageGroupName="...")
# # Delete the Pipeline
# sm_client.delete_pipeline(PipelineName="tuning-step-pipeline")
```
| github_jupyter |
# Ensembles
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
sns.set_theme()
rng = np.random.default_rng(42)
x = rng.uniform(size=(150, 1), low=0.0, high=10.0)
x_train, x_test = x[:100], x[100:]
x_plot = np.linspace(0, 10, 500).reshape(-1, 1)
def lin(x):
return 0.85 * x - 1.5
def fun(x):
return 2 * np.sin(x) + 0.1 * x ** 2 - 2
def randomize(fun, x, scale=0.5):
return fun(x) + rng.normal(size=x.shape, scale=scale)
def evaluate_non_random_regressor(reg_type, f_y, *args, **kwargs):
reg = reg_type(*args, **kwargs)
y_train = f_y(x_train).reshape(-1)
y_test = f_y(x_test).reshape(-1)
reg.fit(x_train, y_train)
y_pred = reg.predict(x_test)
x_plot = np.linspace(0, 10, 500).reshape(-1, 1)
fig, ax = plt.subplots(figsize=(20, 8))
sns.lineplot(x=x_plot[:, 0], y=reg.predict(x_plot), ax=ax)
sns.lineplot(x=x_plot[:, 0], y=f_y(x_plot[:, 0]), ax=ax)
sns.scatterplot(x=x_train[:, 0], y=y_train, ax=ax)
plt.show()
mae = mean_absolute_error(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print(
"\nNo randomness: " f"MAE = {mae:.2f}, MSE = {mse:.2f}, RMSE = {rmse:.2f}"
)
return reg
def plot_graphs(f_y, reg, reg_rand, reg_chaos, y_train, y_rand_train, y_chaos_train):
x_plot = np.linspace(0, 10, 500).reshape(-1, 1)
fig, ax = plt.subplots(figsize=(20, 12))
sns.lineplot(x=x_plot[:, 0], y=reg.predict(x_plot), ax=ax)
sns.scatterplot(x=x_train[:, 0], y=y_train, ax=ax)
sns.lineplot(x=x_plot[:, 0], y=reg_rand.predict(x_plot), ax=ax)
sns.scatterplot(x=x_train[:, 0], y=y_rand_train, ax=ax)
sns.lineplot(x=x_plot[:, 0], y=reg_chaos.predict(x_plot), ax=ax)
sns.scatterplot(x=x_train[:, 0], y=y_chaos_train, ax=ax)
sns.lineplot(x=x_plot[:, 0], y=f_y(x_plot[:, 0]), ax=ax)
plt.show()
def print_evaluation(y_test, y_pred, y_rand_test, y_rand_pred, y_chaos_test, y_chaos_pred):
mae = mean_absolute_error(y_test, y_pred)
mae_rand = mean_absolute_error(y_rand_test, y_rand_pred)
mae_chaos = mean_absolute_error(y_chaos_test, y_chaos_pred)
mse = mean_squared_error(y_test, y_pred)
mse_rand = mean_squared_error(y_rand_test, y_rand_pred)
mse_chaos = mean_squared_error(y_chaos_test, y_chaos_pred)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
rmse_rand = np.sqrt(mean_squared_error(y_rand_test, y_rand_pred))
rmse_chaos = np.sqrt(mean_squared_error(y_chaos_test, y_chaos_pred))
print(
"\nNo randomness: " f"MAE = {mae:.2f}, MSE = {mse:.2f}, RMSE = {rmse:.2f}"
)
print(
"Some randomness: "
f"MAE = {mae_rand:.2f}, MSE = {mse_rand:.2f}, RMSE = {rmse_rand:.2f}"
)
print(
"Lots of randomness: "
f"MAE = {mae_chaos:.2f}, MSE = {mse_chaos:.2f}, RMSE = {rmse_chaos:.2f}"
)
def evaluate_regressor(reg_type, f_y, *args, **kwargs):
reg = reg_type(*args, **kwargs)
reg_rand = reg_type(*args, **kwargs)
reg_chaos = reg_type(*args, **kwargs)
y_train = f_y(x_train).reshape(-1)
y_test = f_y(x_test).reshape(-1)
y_pred = reg.fit(x_train, y_train).predict(x_test)
y_rand_train = randomize(f_y, x_train).reshape(-1)
y_rand_test = randomize(f_y, x_test).reshape(-1)
y_rand_pred = reg_rand.fit(x_train, y_rand_train).predict(x_test)
y_chaos_train = randomize(f_y, x_train, 1.5).reshape(-1)
y_chaos_test = randomize(f_y, x_test, 1.5).reshape(-1)
y_chaos_pred = reg_chaos.fit(x_train, y_chaos_train).predict(x_test)
plot_graphs(f_y, reg, reg_rand, reg_chaos, y_train, y_rand_train, y_chaos_train)
print_evaluation(y_test, y_pred, y_rand_test, y_rand_pred, y_chaos_test, y_chaos_pred)
```
# Ensembles, Random Forests, Gradient Boosted Trees
## Ensemble Methods
Idea: combine several estimators to improve their overal performance.
- Averaging methods:
- Independent estimators, average predictions
- Reduces variance (overfitting)
- Bagging, random forests
- Boosting methods:
- Train estimators sequentially
- Each estimator is trained to reduce the bias of its (combined) predecessors
### Bagging
- Averaging method: build several estimators of the same type, average their results
- Needs some way to introduce differences between estimators
- Otherwise variance is not reduced
- Train on random subsets of the training data
- Reduce overfitting
- Work best with strong estimators (e.g., decision trees with (moderately) large depth)
### Random Forests
- Bagging classifier/regressor using decision trees
- For each tree in the forest:
- Subset of training data
- Subset of features
- Often significant reduction in variance (overfitting)
- Sometimes increase in bias
```
from sklearn.ensemble import RandomForestRegressor
evaluate_non_random_regressor(RandomForestRegressor, lin, random_state=42);
evaluate_non_random_regressor(RandomForestRegressor, fun, random_state=42);
evaluate_non_random_regressor(
RandomForestRegressor, fun, n_estimators=25, criterion="absolute_error", random_state=42
);
evaluate_regressor(RandomForestRegressor, lin, random_state=42);
evaluate_regressor(
RandomForestRegressor, lin, n_estimators=500, max_depth=3, random_state=42
)
evaluate_regressor(
RandomForestRegressor, lin, n_estimators=500, min_samples_leaf=6, random_state=42
)
evaluate_regressor(RandomForestRegressor, fun, random_state=42)
evaluate_regressor(
RandomForestRegressor,
fun,
n_estimators=1000,
min_samples_leaf=6,
random_state=43,
n_jobs=-1,
)
```
## Gradient Boosted Trees
- Boosting method for both regression and classification
- Requires differentiable loss function
```
from sklearn.ensemble import GradientBoostingRegressor
evaluate_non_random_regressor(GradientBoostingRegressor, lin);
evaluate_non_random_regressor(GradientBoostingRegressor, fun);
evaluate_regressor(GradientBoostingRegressor, lin);
evaluate_regressor(GradientBoostingRegressor, lin, n_estimators=200, learning_rate=0.05, loss="absolute_error");
evaluate_regressor(GradientBoostingRegressor, lin, n_estimators=500, learning_rate=0.01,
loss="absolute_error", subsample=0.1, random_state=46);
evaluate_regressor(GradientBoostingRegressor, fun, n_estimators=500, learning_rate=0.01,
loss="absolute_error", subsample=0.1, random_state=44);
```
### Multiple Features
```
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
np.set_printoptions(precision=1)
x, y, coef = make_regression(n_samples=250, n_features=4, n_informative=1, coef=True, random_state=42)
x.shape, y.shape, coef
fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(20, 12))
for i, ax in enumerate(axs.reshape(-1)):
sns.scatterplot(x=x[:, i], y=y, ax=ax)
x, y, coef = make_regression(n_samples=250, n_features=20, n_informative=10, coef=True, random_state=42)
x.shape, y.shape, coef
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(20, 12))
for i in range(2):
sns.scatterplot(x=x[:, i], y=y, ax=axs[0, i]);
for i in range(2):
sns.scatterplot(x=x[:, i + 6], y=y, ax=axs[1, i]);
lr_clf = LinearRegression()
lr_clf.fit(x_train, y_train)
y_lr_pred = lr_clf.predict(x_test)
mean_absolute_error(y_test, y_lr_pred), mean_squared_error(y_test, y_lr_pred)
lr_clf.coef_.astype(np.int32), coef.astype(np.int32)
dt_clf = DecisionTreeRegressor()
dt_clf.fit(x_train, y_train)
y_dt_pred = dt_clf.predict(x_test)
mean_absolute_error(y_test, y_dt_pred), mean_squared_error(y_test, y_dt_pred)
rf_clf = RandomForestRegressor()
rf_clf.fit(x_train, y_train)
y_rf_pred = rf_clf.predict(x_test)
mean_absolute_error(y_test, y_rf_pred), mean_squared_error(y_test, y_rf_pred)
gb_clf = GradientBoostingRegressor()
gb_clf.fit(x_train, y_train)
y_gb_pred = gb_clf.predict(x_test)
mean_absolute_error(y_test, y_gb_pred), mean_squared_error(y_test, y_gb_pred)
x, y, coef = make_regression(n_samples=250, n_features=20, n_informative=10, noise=100.0, coef=True, random_state=42)
x.shape, y.shape, coef
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
lr_clf = LinearRegression()
lr_clf.fit(x_train, y_train)
y_lr_pred = lr_clf.predict(x_test)
mean_absolute_error(y_test, y_lr_pred), mean_squared_error(y_test, y_lr_pred)
dt_clf = DecisionTreeRegressor()
dt_clf.fit(x_train, y_train)
y_dt_pred = dt_clf.predict(x_test)
mean_absolute_error(y_test, y_dt_pred), mean_squared_error(y_test, y_dt_pred)
rf_clf = RandomForestRegressor()
rf_clf.fit(x_train, y_train)
y_rf_pred = rf_clf.predict(x_test)
mean_absolute_error(y_test, y_rf_pred), mean_squared_error(y_test, y_rf_pred)
gb_clf = GradientBoostingRegressor()
gb_clf.fit(x_train, y_train)
y_gb_pred = gb_clf.predict(x_test)
mean_absolute_error(y_test, y_gb_pred), mean_squared_error(y_test, y_gb_pred)
x, y, coef = make_regression(n_samples=250, n_features=20, n_informative=10, noise=100.0,
coef=True, random_state=42)
y += (20 * x[:, 1]) ** 2
x.shape, y.shape, coef
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(20, 12))
for i in range(2):
sns.scatterplot(x=x[:, i], y=y, ax=axs[0, i]);
for i in range(2):
sns.scatterplot(x=x[:, i + 6], y=y, ax=axs[1, i]);
lr_clf = LinearRegression()
lr_clf.fit(x_train, y_train)
y_lr_pred = lr_clf.predict(x_test)
mean_absolute_error(y_test, y_lr_pred), mean_squared_error(y_test, y_lr_pred)
dt_clf = DecisionTreeRegressor()
dt_clf.fit(x_train, y_train)
y_dt_pred = dt_clf.predict(x_test)
mean_absolute_error(y_test, y_dt_pred), mean_squared_error(y_test, y_dt_pred)
rf_clf = RandomForestRegressor()
rf_clf.fit(x_train, y_train)
y_rf_pred = rf_clf.predict(x_test)
mean_absolute_error(y_test, y_rf_pred), mean_squared_error(y_test, y_rf_pred)
gb_clf = GradientBoostingRegressor()
gb_clf.fit(x_train, y_train)
y_gb_pred = gb_clf.predict(x_test)
mean_absolute_error(y_test, y_gb_pred), mean_squared_error(y_test, y_gb_pred)
```
## Feature Engineering
```
x = rng.uniform(size=(150, 1), low=0.0, high=10.0)
x_train, x_test = x[:100], x[100:]
x_plot = np.linspace(0, 10, 500)
x_train[:3]
y_lin_train = lin(x_train).reshape(-1)
y_lin_test = lin(x_test).reshape(-1)
y_fun_train = fun(x_train.reshape(-1))
y_fun_test = fun(x_test).reshape(-1)
x_squares = x * x
x_squares[:3]
x_sins = np.sin(x)
x_sins[:3]
x_train_aug = np.concatenate([x_train, x_train * x_train, np.sin(x_train)], axis=1)
x_train_aug[:3]
x_test_aug = np.concatenate([x_test, x_test * x_test, np.sin(x_test)], axis=1)
# from sklearn.linear_model import Ridge
# lr_aug_lin = Ridge()
lr_aug_lin = LinearRegression()
lr_aug_lin.fit(x_train_aug, y_lin_train);
lr_aug_lin.coef_, lr_aug_lin.intercept_
y_aug_lin_pred = lr_aug_lin.predict(x_test_aug)
mean_absolute_error(y_lin_test, y_aug_lin_pred), mean_squared_error(
y_lin_test, y_aug_lin_pred
)
x_test.shape, x_plot.shape
def train_and_plot_aug(f_y, scale=0.5):
y_plot = f_y(x_plot)
f_r = lambda x: randomize(f_y, x, scale=scale)
y_train = f_r(x_train_aug[:, 0])
y_test = f_r(x_test)
lr_aug = LinearRegression() # Try with Ridge() as well...
lr_aug.fit(x_train_aug, y_train)
y_pred_test = lr_aug.predict(
np.concatenate([x_test, x_test * x_test, np.sin(x_test)], axis=1)
)
x_plot2 = x_plot.reshape(-1, 1)
y_pred_plot = lr_aug.predict(
np.concatenate([x_plot2, x_plot2 * x_plot2, np.sin(x_plot2)], axis=1)
)
fig, ax = plt.subplots(figsize=(12, 6))
sns.scatterplot(x=x_plot2[:, 0], y=y_plot, color="orange")
sns.scatterplot(x=x_plot2[:, 0], y=y_pred_plot, color="red")
sns.scatterplot(x=x_train_aug[:, 0], y=y_train, color="green")
plt.show()
mae_in = mean_absolute_error(y_test, y_pred_test)
mse_in = mean_absolute_error(y_test, y_pred_test)
rmse_in = np.sqrt(mse_in)
y_nr = f_y(x_test)
mae_true = mean_absolute_error(y_nr, y_pred_test)
mse_true = mean_absolute_error(y_nr, y_pred_test)
rmse_true = np.sqrt(mse_true)
print(f"Vs. input: MAE: {mae_in:.2f}, MSE: {mse_in:.2f}, RMSE: {rmse_in:.2f}")
print(f"True: MAE: {mae_true:.2f}, MSE: {mse_true:.2f}, RMSE: {rmse_true:.2f}")
print(f"Parameters: {lr_aug.coef_}, {lr_aug.intercept_}")
train_and_plot_aug(lin)
train_and_plot_aug(fun, scale=0.0)
train_and_plot_aug(fun, scale=0.5)
train_and_plot_aug(fun, scale=1.5)
train_and_plot_aug(fun, scale=3)
def fun2(x): return 2.8 * np.sin(x) + 0.3 * x + 0.08 * x ** 2 - 2.5
train_and_plot_aug(fun2, scale=1.5)
train_and_plot_aug(lambda x: np.select([x<=6, x>6], [-0.5, 3.5]))
```
| github_jupyter |
# Keras tutorial - the Happy House
Welcome to the first assignment of week 2. In this assignment, you will:
1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
2. See how you can in a couple of hours build a deep learning algorithm.
Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
```
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
%matplotlib inline
```
**Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
## 1 - The Happy House
For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
<img src="images/happy-house.jpg" style="width:350px;height:270px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
<img src="images/house-members.png" style="width:550px;height:250px;">
Run the following code to normalize the dataset and learn about its shapes.
```
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Details of the "Happy" dataset**:
- Images are of shape (64,64,3)
- Training: 600 pictures
- Test: 150 pictures
It is now time to solve the "Happy" Challenge.
## 2 - Building a model in Keras
Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
Here is an example of a model in Keras:
```python
def model(input_shape):
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
```
Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
**Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
**Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
```
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
X_input = Input(shape=input_shape)
X = ZeroPadding2D((3,3))(X_input)
# Conv -> BN -> ReLU
X = Conv2D(32, (7, 7), strides=(1,1), name='conv0')(X)
X = BatchNormalization(axis=3, name='bn0')(X)
X = Activation('relu')(X)
# Max-pool
X = MaxPooling2D((2,2), name='max_pool')(X)
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
model = Model(X_input, X, name='HappyModel')
### END CODE HERE ###
return model
```
You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
1. Create the model by calling the function above
2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
**Exercise**: Implement step 1, i.e. create the model.
```
### START CODE HERE ### (1 line)
happyModel = HappyModel((64,64,3))
### END CODE HERE ###
```
**Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
```
### START CODE HERE ### (1 line)
happyModel.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
### END CODE HERE ###
```
**Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
```
### START CODE HERE ### (1 line)
happyModel.fit(X_train,
Y_train,
epochs=10,
batch_size=32)
### END CODE HERE ###
```
Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
**Exercise**: Implement step 4, i.e. test/evaluate the model.
```
### START CODE HERE ### (1 line)
preds = happyModel.evaluate(X_test, Y_test)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
```
If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets. To pass this assignment, you have to get at least 75% accuracy.
To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
If you have not yet achieved 75% accuracy, here're some things you can play around with to try to achieve it:
- Try using blocks of CONV->BATCHNORM->RELU such as:
```python
X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
```
until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
- You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
- Change your optimizer. We find Adam works well.
- If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
- Run on more epochs, until you see the train accuracy plateauing.
Even if you have achieved 75% accuracy, please feel free to keep playing with your model to try to get even better results.
**Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
## 3 - Conclusion
Congratulations, you have solved the Happy House challenge!
Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
<font color='blue'>
**What we would like you to remember from this assignment:**
- Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
- Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
## 4 - Test with your own image (Optional)
Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
```
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
```
## 5 - Other useful functions in Keras (Optional)
Two other basic features of Keras that you'll find useful are:
- `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
- `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
Run the following code.
```
happyModel.summary()
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
```
| github_jupyter |
```
%matplotlib inline
import lsqfit
from model_avg_paper import *
from model_avg_paper.test_tmin import test_vary_tmin_SE
p0_test_ME = {
'A0': 2.0,
'E0': 0.8,
'A1': 10.4,
'E1': 1.16,
}
Nt = 32
noise_params = {
'noise_amp': 0.3,
'noise_samples': 500,
'frac_noise': True,
'cross_val': False,
'cv_frac': 0.1,
}
obs_name='E0'
correlated_data = True
rho=0.6
# Set seed for consistency of outcome
#np.random.seed(10911) # Fig 3, subfig A; Fig 4
#np.random.seed(81890) # Fig 3, subfig B
#np.random.seed(87414) # Fig 3, subfig C
np.random.seed(77700) # Fig 3, subfig D
def ME_model(x,p):
return multi_exp_model(x,p,Nexc=2)
if correlated_data:
test_data = gen_synth_data_corr(
np.arange(0,Nt),
p0_test_ME,
ME_model,
rho=rho,
**noise_params)
else:
test_data = gen_synth_data(
np.arange(0,Nt),
p0_test_ME,
ME_model,
**noise_params)
test_res = test_vary_tmin_SE(test_data, Nt=Nt, max_tmin=26, obs_name=obs_name, IC='AIC',
cross_val=noise_params['cross_val'])
print(test_res['obs_avg'])
## Figure 3
import matplotlib.ticker as ticker
gs = plt.GridSpec(2, 1, height_ratios=[3,1])
gs.update(hspace=0.06)
ax1 = plt.subplot(gs[0])
plot_gvcorr([test_res['obs_avg']], x=np.array([1.5]), color='red', markersize=7, marker='s', open_symbol=True, label='Model avg.')
plot_gvcorr(test_res['obs'], x=test_res['tmin'], label='Individual fits')
ax1.plot(np.array([-1,34]), 0*np.array([0,0])+p0_test_ME[obs_name], linestyle='--', color='k', label='Model truth')
#ax1.set_xlabel('$N_p$')
ax1.set_ylabel('$E_0$')
ax1.legend(loc='center left', bbox_to_anchor=(1,0.5))
ax1.set_xlim(0.7,27.3)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(gs[1])
p_norm = test_res['probs'] / np.sum(test_res['probs'])
Q_norm = test_res['Qs'] / np.sum(test_res['Qs'])
plt.plot(test_res['tmin'], p_norm, color='orange', label='pr$(M|D)$')
plt.plot(test_res['tmin'], Q_norm, color='blue', linestyle='-.', label='Fit p-value') # Note: fit prob != model prob!
tick_spacing = 4
ax2.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
plt.yticks([0,np.max(p_norm)])
ax2.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: '0' if x == 0 else '{:.2f}'.format(x)))
ax2.set_xlim(0.7,27.3)
# Put a legend to the right of the current axis
ax2.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax2.set_xlabel(r'$t_{\rm min}$')
ax2.set_ylabel('p')
# Uncomment to save figure to disk
#plt.savefig('plots/exp_avg_4.pdf', bbox_inches = "tight")
# Scaling w/number of samples
Nsamp_array = np.array([20, 40, 80, 160, 320, 640, 2040, 4096, 4096*2, 4096*4])
Nsamp_max = Nsamp_array[-1]
noise_params['noise_samples'] = Nsamp_max
if correlated_data:
scale_data = gen_synth_data_corr(
np.arange(0,Nt),
p0_test_ME,
ME_model,
rho=rho,
**noise_params)
else:
scale_data = gen_synth_data(
np.arange(0,Nt),
p0_test_ME,
ME_model,
**noise_params)
model_avg_vs_Nsamp = []
naive_avg_vs_Nsamp = []
fixed_tmin_vs_Nsamp = []
fixed_tmin_2_vs_Nsamp = []
fw_vs_Nsamp = []
fix_tmin = 14
fix_tmin_2 = 8
for Nsamp in Nsamp_array:
test_data_scale = cut_synth_data_Nsamp(scale_data, Nsamp)
test_res_scale = test_vary_tmin_SE(test_data_scale, Nt=Nt, max_tmin=Nt-4, obs_name=obs_name, IC='AIC')
test_res_scale_naive = test_vary_tmin_SE(test_data_scale, Nt=Nt, max_tmin=Nt-4, obs_name=obs_name,
IC='naive')
model_avg_vs_Nsamp.append(test_res_scale['obs_avg'])
naive_avg_vs_Nsamp.append(test_res_scale_naive['obs_avg'])
fixed_tmin_vs_Nsamp.append(test_res_scale['obs'][fix_tmin])
fixed_tmin_2_vs_Nsamp.append(test_res_scale['obs'][fix_tmin_2])
fw_vs_Nsamp.append(obs_avg_full_width(test_res_scale['obs'], test_res_scale['Qs'], test_res_scale['fits'], bf_i=None))
## Figure 4
plot_gvcorr(model_avg_vs_Nsamp, x=np.log(Nsamp_array)+0.1, label='Model avg. (AIC)')
plot_gvcorr(fixed_tmin_vs_Nsamp, x=np.log(Nsamp_array)+0.2, color='red', marker='s', markersize=6, label=r'Fixed $t_{\rm min} = 14$')
plot_gvcorr(fw_vs_Nsamp, x=np.log(Nsamp_array)+0.3, marker='X', markersize=8, color='orange', label='Full-width systematic')
plot_gvcorr(naive_avg_vs_Nsamp, x=np.log(Nsamp_array)+0.4, color='silver', marker='v', markersize=8, label=r'Model avg. (naive)')
plt.plot(np.arange(0,10), 0*np.arange(0,10)+p0_test_ME[obs_name], linestyle='--', color='k', label='Model truth')
plt.xlabel(r'$\log(N_s)$')
plt.ylabel(r'$E_0$')
plt.xlim(2.7,7.)
plt.ylim(0.78,0.82)
# Put a legend to the right of the current axis
ax = plt.subplot(111)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# Uncomment to save figure to disk
#plt.savefig('plots/exp_N_scaling.pdf', bbox_inches = "tight")
```
| github_jupyter |
### Classes
Finally we get to classes.
I assume you already have some knowledge of classes and OOP in general, so I'll focus on the semantics of creating classes and some of the differences with Java classes.
First, the question of visibility. There is no such thing as private or public in Python. Everything is public, period. So we don't have to specify the visibility of functions and attributes in Python.
Class instantiations are done in two steps - the instance is created first, and then the instance is initialized. In general we hook into the initialization phase and leave the object creation alone. We do this by using a special method in the class called `__init__`. We'll see how large a role special methods play in Python.
The important thing to note is that by the time `__init__` is called in our class, the object (instance) has **already** been created.
We use the `class` keyword to create classes:
```
class Person:
pass
```
`pass` is something we can use in Python to indicate "do nothing" (a so called "no-op" operation). Here I use it to supply a body for the class definition, but don't actually want to specify any functionality.
So now we can actually create instances of the `Person` object at this point. They will be pretty useless since we have not implemented any functionality yet.
We create instances of classes by **calling** the class - remember how we call things in Python, we use `()`:
```
p = Person()
id(p), type(p)
```
So now we may want to add some functions to the class. Whenever we define a function in a class, we have to understand what happens when we call that function from an instance, using dot notation:
For example, if we write
```p.say_hello()```
then we are calling the `say_hello()` function from the instance, and Python will **bind** that function to the specific instance - i.e. it creates an association between the instance used to call the function, and the function.
The way it does this is by passing in the instance reference to the function as the first positional argument - in this case it would actually call `say_hello(p)`. And our `say_hello` function now has access to the instance it was called from, including any internal state.
When functions are bound to an instance, they are called **methods** of the class, and, in particular **instance methods** because they are bound to instances of the class when called. (There are other types of functions that can be bound to the class, called *class methods*, but this is beyond the scope of this primer).
Let's see how this works:
```
class Person:
def say_hello(instance, name):
return f'{instance} says hello to {name}'
```
So here, we had to make sure the first argument of our function was created to receive the instance it is being called from. After that we are free to add our own arguments as needed.
```
p = Person()
```
Let's see what `p` looks like when we print it:
```
p
```
And now let's call the `say_hello` method from the instance `p`:
```
p.say_hello('Alex')
```
You'll notice that we did not pass `p` as the first argument to `say_hello` - Python did that for us since we wrote `p.say_hello`.
By convention, that `instance` argument I wrote above, is usually named `self`, for obvious reasons. But it is just a convention, although one you should stick to.
```
class Person:
def say_hello(self, name):
return f'{self} says hello to {name}'
p = Person()
p.say_hello('Alex')
```
So now let's turn our attention to instance attributes.
Python is dynamic, so instance attributes do not have to be defined at the time the class is created. In fact we rarely do so.
Let's see how we can add an attribute to an instance after it's been created:
```
p = Person()
p.name = 'Alex'
```
That's it, `p` now has an attribute called `name`:
```
p.name
```
But this is specific to this instance, not other instances:
```
p2 = Person()
p2.name
```
So instance attributes are **specific** to the instance (hence the name).
Of course we can define attributes by calling methods in our class - let's see an example of this:
```
class Person:
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
p = Person()
```
At this point `p` does **not** have a `name` attribute (it hasn't been set yet!):
```
p.get_name()
```
But we can easily set it using the `set_name` method:
```
p.set_name('Alex')
p.get_name()
```
And of course the attribute is called `name` and is easily accessible directly as well:
```
p.name
```
This is what is called a *bare* attribute - it is not hidden by getter and setter methods like we would normally do in Java (remember we do not have private variables).
You'll notice the issue we had - we would get an exception if we tried to access the attribute before it was actually created.
For this reason, best practice is to create these instance attributes (even setting them to a default value or `None`) when the class instance is being created.
The best place to do this is in the *initialization* phase of the class (remember that class instantiation has two phases - creation and initialization).
To do this we use the special method `__init__`.
This is going to be a functionm in our class, and will be bound to the instance when it is called (by that time the instance has already been created), so just like our `set_name` method, we'll need to allow for the instance to be received as the first argument:
```
class Person:
def __init__(self, name):
self.name = name
```
So the `__init__` method is basically doing the same thing as our `set_name` method - the difference is in how it is called.
When we create an instance using `Person()`, Python looks for, and if available calls, the `__init__` method (that's why it's called a *special method*).
In our case here, the first argument will receive the just created object, but we have one additional argument, `name`. So we need to pass that in when we create the instance:
```
p = Person('name')
```
The `__init__` method was actually called - let's see this:
```
class Person:
def __init__(self, name):
print(f'__init__ called for object {self}')
self.name = name
p = Person('Alex')
```
And in fact, the memory address of `p` is:
```
hex(id(p))
```
which as you can see, is excactly the same object `self` was set to when `__init__` was called.
And our instance `p` now has an attribute called `name`:
```
p.name
```
We can create another instance:
```
p2 = Person('Eric')
hex(id(p2)), p2.name
```
And that has not affected the `name` of `p` - since `name` is an instance attribute (it is specific to the instance):
```
p.name
```
| github_jupyter |
# Массивы
```
import numpy
import numpy as np
a = numpy.zeros((2, 3), dtype=numpy.float32) # ничего не вывело
a # выведет а, по оси Y левая координата (внешняя), по оси X правая (внутренняя)
a; # не выведет а
type(a)
a.__class__
a = numpy.zeros((2, 3, 4), dtype=numpy.float32);
a # выведет новый а, по оси X правая координата, по оси Y -- внешние координаты
a += 1;
display(a);
a *=2;
display(a); # сложение со скаляром, умножение на скаляр; display -- вывод с помощью jupyter (colab)
ones = lambda *shape: numpy.ones(shape, dtype=numpy.float32)
ones(2,2,2)
```
# Задача
```
"""
Задача:
вывести красиво операцию A + B = C на экран
"""
my_input = [ones(2, 3), '+', ones(2, 3), '=', ones(2,3)*2]
print(my_input)
print(*my_input)
display(*my_input)
# пусть наша функция имеет такой формат:
def pp(*lst):
pass
pp(*my_input)
lines = [repr(a).split('\n') for a in my_input]; lines
lines = [str(a).split('\n') for a in my_input]; lines
max_lines = max([len(line) for line in lines])
max_lines
lines = [line+['']*(max_lines-len(line)) for line in lines]; lines
```
### транспонирование, reshape, операция получения максимума
```
lines_arr = numpy.array(lines); lines_arr.T
widths = [len(x) for x in lines]; display(widths)
lines_arr.shape, lines_arr.flatten().shape
widths = numpy.array([len(x) for x in lines_arr.flatten()]); display(widths)
widths = widths.reshape(len(lines_arr), -1); display(widths)
widths = widths.reshape(*lines_arr.shape); display(widths)
numpy.max(widths)
widths
widths.max()
widths.max(axis=1) # складывает по внутренней оси,
line_widths = widths.max(axis=1)
line_widths
```
### форматирование всё равно придётся делать поэлементно. зашли в тупик...
```
lines = [str(a).split('\n') for a in my_input]; lines
max_lines = max([len(line) for line in lines])
max_lines
def fmt_item(lines, max_lines=0):
max_width = max([len(line) for line in lines])
empty = ' '*max_width
lines = [line.ljust(max_width) for line in lines]
lines += [empty] * (max_lines - len(lines))
return lines
results = fmt_item(str(numpy.arange(1, 7).reshape(2,-1)).split('\n'), max_lines=3)
print('\n'.join(results))
# а что делать дальше, когда у нас есть такие массивы?
# воспользуемся array().T
def pp(*lst):
lines = [str(item).split('\n') for item in lst]
max_lines = max([len(item) for item in lines])
lines = [fmt_item(item, max_lines=max_lines) for item in lines]
lines_t = numpy.array(lines).T
print('\n'.join([' '.join(line) for line in lines_t]))
pp(*my_input)
```
### Попробуем нашу функцию:
```
A = numpy.arange(1, 11).reshape(5,2)
B = numpy.linspace(10, 30, 10).reshape(5,2)
R = A * B
pp(A, '*', B, '=', R)
import operator
#operator.add = lambda x, y: x + y
def op(x, name, y, func):
r = func(x, y)
pp(x, name, y, '=', r)
op(numpy.array([[1,2],[3,4]]), '+', numpy.array([[5,6],[7,8]]), operator.add)
# вспомним классы, оформим в виде класса
class Op:
def __init__(self, name, func):
self.name = name
self.func = func
def __call__(self, x, y):
r = self.func(x, y)
pp(x, self.name, y, '=', r)
SSum = Op('+', lambda x, y: x + y)
SMul = Op('*', lambda x, y: x * y)
MMul = Op('@', lambda x, y: x @ y) # python 3.5+
SMul(A, B)
MMul(A.T, B)
tr = numpy.array([[9,8],[7,6],[5,4]])
te = numpy.array([[1,2],[3,4]])
num_train = tr.shape[0]
num_test = te.shape[0]
dists = np.zeros((num_test, num_train), np.float32)
pp(dists)
for i_test in range(num_test):
for i_train in range(num_train):
dists[i_test, i_train] = np.abs(np.add.reduce(te[i_test] - tr[i_train]))
pp(dists[i_test, i_train])
pp(dists)
num_train = tr.shape[0]
num_test = te.shape[0]
dists = np.zeros((num_test, num_train), np.float32)
pp(dists)
for i_test in range(num_test):
dists[i_test] = np.sum(np.abs(te[i_test]- tr), axis=1)
pp(dists)
new_te = te[:, np.newaxis]
dists = np.sum(np.abs(new_te - tr), axis=2)
dists
for i in range(dists.shape[0]):
print(np.sort(dists[i])[:3])
a = np.array([9, 4, 4, 3, 3, 9, 0, 4, 6, 0])
b = np.array([False, False, False, False, False, False, False, False, False, False])
ind = np.argpartition(a, 1)#[:3]
ind #[6, 9, 4]
#b[ind] #array([ True, False, True])
#c = np.bincount(b[ind]) # array([1, 2]
#np.argmax(c)
tr
a = np.array_split(tr, 3)
for i in range(len(a)):
b = np.delete(a.copy(), i, 0)
print(b, ',')
a =np.concatenate(a)
a
# 0, 1, 2, 3, 4, 5, 6, 7
arr = np.array([1100, 1, 9, 2, 0, 17, 17, 5])
c = np.bincount(arr)
np.argmax(c)
tr
tr_n = tr - np
```
# Простая индексация
```
pred = np.zeros(5, np.bool)
pred
pp(B[0, :], B[-1,-2], B[-2:-1, 0])
```
# Broadcasting и stack
```
SSum(A, B[0]) # что произойдёт? это преобразование называется broadcasting
SMul(A, B[0]) # смотрим ещё раз, на умножении
SMul(A, numpy.stack(list(B[0:1])*5))
```
# Бинарные и логические операции с массивами
```
pp(A, A>3, B, B>=10.) # сравнение float -- дело неточное
1<<23
```
Float 32 format: 
```
pp(numpy.isclose(B, 10), (10 - 1e-7 <= B) & (B <= 10 + 1e-7))
idx = numpy.isclose(B, 10) | numpy.isclose(B, 30)
B[idx]
```
# Индексация массивов
```
pp(B)
pp(numpy.isclose(B[1:-1, 1:-1], 10)) # классическая индексация питона
pp(numpy.isclose(B[0], 10)) # берём первую строку
pp(numpy.isclose(B[:, 1], 10)) # берём второй столбец
```
# Cлучайные значения
```
pp(numpy.random.uniform(0, 1), numpy.random.randn(2, 3), numpy.random.rand(2, 3))
import random
random.seed(10)
numpy.random.seed(10)
nr = numpy.random.uniform(0, 1)
nr2 = numpy.random.uniform(0, 1)
rr = random.uniform(0, 1)
rr2 = random.uniform(0, 1)
pp(rr, nr, rr2, nr2)
random.seed(10)
numpy.random.seed(10)
rr = random.uniform(0, 1)
nr = numpy.random.uniform(0, 1)
rr2 = random.uniform(0, 1)
nr2 = numpy.random.uniform(0, 1)
pp(rr, nr, rr2, nr2)
import matplotlib
# значение по умолчанию в colab, но не в локальных ноутбуках
matplotlib.rcParams['axes.grid'] = True
import matplotlib.pyplot as plt
# нарисуем график для 11 случайных нормально распределённых чисел.
x = numpy.linspace(0, 10, 11)
dots = numpy.random.randn(11)
plt.plot(x, dots);
import matplotlib
matplotlib.rcParams['axes.grid'] = False # убираем белую клетку (настройки colab)
import matplotlib.pyplot as plt
plt.plot(x, dots); # рисуем линию ещё раз
# если надо просто вывести точки, без линий между ними
plt.scatter(x, dots);
# выводим двумерный массив, используя цветовую схему 'hot'.
# matplotlib автоматически определяет минимум и максимум
# по нему настраивает цвета
plt.imshow(numpy.arange(0, 120).reshape(6, 20), 'hot');
```
# Строки и словари
```
# Задача: определить, какой символ встречается чаще всего в данном тексте,
# не считая пробелов!
a = "Кот сидел на крыше тише мыши"
pp(set(list(a)))
from collections import Counter
# приближённое решение.
Counter(list(a)).most_common()
# убрать пробел и будет то, что надо. попробуйте сами.
# напишем вручную код аналогичный тому, что происходит внутри Counter
counter = {}
for ch in list(a.lower()):
if ch == ' ': continue
if ch in counter:
counter[ch] += 1
else:
counter[ch] = 1
pp(counter)
pp(type(ch)) # в питоне даже отдельные символы -- это строчки
max(counter.values()) # нашли максимум
# алгоритм для поиска максимума
winner_ch = None
winner_count = 0
for ch, count in counter.items():
if count > winner_count:
winner_count = count
winner_ch = ch
print(winner_ch, winner_count) # и вот наш символ-победитель
# имейте в виду, победителя на самом деле три.
# исправьте код, чтобы вывести и второй и третий.
```
---
---
---
```
A
a=A[0]
b=B[0]
c=B[0].T
print(a.shape, b.shape, c.shape)
pp(a, ' ', b, ' ', a+b)
a=A[0:1]
b=B[0:1]
c=B[0:1].T
pp('shapes:', a.shape, b.shape, c.shape)
pp('arrays:', a, ' ', c, ' ', a+c)
a = A[0]
b = A[0].reshape(-1, 1)
c = A[0][:,None] # or [:, numpy.newaxis]
d = A[0][None, :] # or [numpy.newaxis, :]
pp(a.shape, ' ', b.shape, ' ', c.shape, ' ', d.shape)
print()
pp(a, ' ', b, ' ', c, ' ', d)
```
| github_jupyter |
# DoWhy: Different estimation methods for causal inference
This is quick introduction to DoWhy causal inference library.
We will load in a sample dataset and use different methods for estimating causal effect from a (pre-specified)treatment variable to a (pre-specified) outcome variable.
First, let us add required path for python to find DoWhy code and load required packages.
```
import os, sys
sys.path.append(os.path.abspath("../../"))
import numpy as np
import pandas as pd
import logging
import dowhy
from dowhy.do_why import CausalModel
import dowhy.datasets
```
Let us first load a dataset. For simplicity, we simulate a dataset with linear relationships between common causes and treatment, and common causes and outcome.
Beta is the true causal effect.
```
data = dowhy.datasets.linear_dataset(beta=10,
num_common_causes=5,
num_instruments = 2,
num_samples=10000,
treatment_is_binary=True)
df = data["df"]
```
Note that we are using a pandas dataframe to load the data.
## Identifying the causal estimand
We now input a causal graph in the DOT graph format.
```
# With graph
model=CausalModel(
data = df,
treatment=data["treatment_name"],
outcome=data["outcome_name"],
graph=data["dot_graph"],
instruments=data["instrument_names"],
logging_level = logging.INFO
)
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
```
We get a causal graph. Now identification and estimation is done.
```
identified_estimand = model.identify_effect()
print(identified_estimand)
```
## Method 1: Regression
Use linear regression.
```
causal_estimate_reg = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression",
test_significance=True)
print(causal_estimate_reg)
print("Causal Estimate is " + str(causal_estimate_reg.value))
```
## Method 2: Stratification
We will be using propensity scores to stratify units in the data.
```
causal_estimate_strat = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_stratification")
print(causal_estimate_strat)
print("Causal Estimate is " + str(causal_estimate_strat.value))
```
## Method 3: Matching
We will be using propensity scores to match units in the data.
```
causal_estimate_match = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_matching")
print(causal_estimate_match)
print("Causal Estimate is " + str(causal_estimate_match.value))
```
## Method 4: Weighting
We will be using (inverse) propensity scores to assign weights to units in the data.
```
causal_estimate_ipw = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_weighting")
print(causal_estimate_ipw)
print("Causal Estimate is " + str(causal_estimate_ipw.value))
```
## Method 5: Instrumental Variable
We will be using Wald estimator for the provided instrumental variable.
```
causal_estimate_iv = model.estimate_effect(identified_estimand,
method_name="iv.instrumental_variable", method_params={'iv_instrument_name':'Z1'})
print(causal_estimate_iv)
print("Causal Estimate is " + str(causal_estimate_iv.value))
```
## Method 6: Regression Discontinuity
We will be internally converting this to an equivalent instrumental variables problem.
```
causal_estimate_regdist = model.estimate_effect(identified_estimand,
method_name="iv.regression_discontinuity",
method_params={'rd_variable_name':'Z1',
'rd_threshold_value':0.5,
'rd_bandwidth': 0.1})
print(causal_estimate_regdist)
print("Causal Estimate is " + str(causal_estimate_regdist.value))
```
| github_jupyter |
# **01_PREPROCESSING**
Summary:
1. Import and Normalization
2. Split Opinions into Subjects of Interest
3. Text Cleaning
4. Split into Sentences
---
```
from google.colab import drive
drive.mount('/content/drive')
import sys
sys.path.append('/content/drive/My Drive/Università/inforet_prj/')
!pip install -U spacy unidecode
!python -m spacy download en_core_web_sm
import lzma, json
import pandas as pd
import numpy as np
import pickle
from tqdm import tqdm
import spacy
import string
import seaborn as sns
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import re
from unidecode import unidecode
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
nltk.download('punkt')
sns.set()
tqdm.pandas()
nlp = spacy.load("en_core_web_sm")
```
## **1. Import and Normalization**
### *1.1 Data Import*
**NB**: run the 3 cells below only if on Google Colab. Otherwise skip them and download the compressed data manually from https://api.case.law/v1/bulk/22341/download/
```
!pip install selenium
!apt-get update # to update ubuntu to correctly run apt install
!apt install chromium-chromedriver
!cp /usr/lib/chromium-browser/chromedriver /usr/bin
import sys
sys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
wd = webdriver.Chrome('chromedriver', options=chrome_options)
wd.get("https://case.law/bulk/download/")
wd.find_element_by_xpath("/html/body/div/main/div/div/div[2]/div/div[2]/div/div[2]/a").click()
!unzip Illinois-20200302-text.zip
!mv Illinois-20200302-text/data/data.jsonl.xz data.jsonl.xz
!rm -r Illinois-20200302-text
!rm Illinois-20200302-text.zip
```
### *1.2 Data Normalization*
Creation of opinions, citations and df
```
# We know that there will be 183146 items,
# so we set this manually since tqdm will not
# be able to display a progress bar when reading from
# a file.
pbar = tqdm(total=183146)
# Read directly from the compressed file.
# We will create a list where each element is a line
# of the file, which in turns is a json
# (casted in python as a dict).
with lzma.open("data.jsonl.xz") as f:
cases = []
for line in f:
cases.append(json.loads(str(line, 'utf8')))
pbar.update(1)
pbar.close()
# https://pandas.pydata.org/docs/reference/api/pandas.json_normalize.html
df = pd.json_normalize(cases)
del cases
# Flattens the list of attorneys to a single string
# with ; as separator
df["casebody.data.attorneys"] = df.apply(lambda x: "; ".join(x["casebody.data.attorneys"]), axis=1)
"""
Each element of the columns 'citations' and 'casebody.data.opinions' is
a list, and in turn each element of the list is a json object.
This means that we need to unravel those column to have a more "flatten"
version (like a simple table, eg. a DataFrame).
The approach shown here consists of creating two different DataFrames
that will contain data from the two columns. In order to preserve the
association of each row of the new DataFrame with the corresponding data
in the original DataFrame, we will add to each json a new key called "id"
that will have the original row number as value.
"""
def add_id_todict(x, col):
vals = x[col]
for i, elem in enumerate(vals):
d = elem
d["id"] = x.name
vals[i] = d
return vals
df["casebody.data.opinions"] = df.apply(lambda x: add_id_todict(x, "casebody.data.opinions"), axis=1)
df["citations"] = df.apply(lambda x: add_id_todict(x, "citations"), axis=1)
# For clarity, let's also add the "id" column to the original df
df["id"] = df.index.values
# We merge each element in the "citations" column (which is a list)
# to a single list called "citations".
#
# Using list comprehension instead of df["column"].sum()
# because the latter is slow for large df.
citations = [item for x in df["citations"] for item in x]
df.drop(columns=["citations"], inplace=True)
# Same for the opinions column
opinions = [item for x in df["casebody.data.opinions"] for item in x]
df.drop(columns=["casebody.data.opinions"], inplace=True)
# Let's now get the flattened table from the citations
# and from the opinions
citations_df = pd.json_normalize(citations)
opinions_df = pd.json_normalize(opinions)
```
We now have 3 dataframes that can be joined using the "id" column.
```
df['year'] = pd.to_datetime(df['decision_date']).apply(lambda x: x.year)
opinions_df = pd.merge(opinions_df, df[['year','id']], on="id", how="left")
```
### *1.3 Serialize data*
```
with open("/content/drive/MyDrive/Università/inforet_prj/df.pkl", "wb") as f:
pickle.dump(df, f)
with open("/content/drive/MyDrive/Università/inforet_prj/citations.pkl", "wb") as f:
pickle.dump(citations_df, f)
with open("/content/drive/MyDrive/Università/inforet_prj/opinions.pkl", "wb") as f:
pickle.dump(opinions_df, f)
del df
del citations_df
del opinions
del citations
del opinions_df
import gc
gc.collect()
```
---
## **2. Split Opinions into Subjects of Interest**
We divide into 3 groups rows based on the lists of terms provided for each subject of interest: narcotics, weapons and investigation.
```
with open("/content/drive/MyDrive/Università/inforet_prj/opinions.pkl", "rb") as f:
opinions_df = pickle.load(f)
opinions_df["text"] = opinions_df["text"].str.replace("|", " ")
opinions_df.author = opinions_df.author.fillna("")
array = opinions_df["author"].progress_apply(lambda x: nltk.word_tokenize(x.lower()))
authors_judges = []
for op in array:
for token in op:
if token.isalpha() and len(token) > 1:
authors_judges.append(token)
authors_judges = set(authors_judges)
with open("authors_judges.pkl", "wb") as f:
pickle.dump(authors_judges, f)
!cp authors_judges.pkl /content/drive/MyDrive/Università/inforet_prj
def typo(text):
cleaned_text = (
text.replace('cannabi ','cannabis ')
.replace('lysergic acid diethylamide', 'lsd')
.replace('methylenedioxymethamphetamine', 'mdma')
.replace('MDMA', 'mdma')
.replace('methylenedioxyamphetamine', 'mda')
.replace('ciacetyl','diacetyl')
.replace(' nar cotic', ' narcotic')
.replace(' fi ','')
)
return cleaned_text
opinions_df['text'] = opinions_df.text.progress_apply(lambda x: typo(x))
narcotics = ["cannabis", "marijuana", "lsd", "heroin", 'methaqualone', "ecstasy", "mdma", "cocaine", "cocaine", "methamphetamine", "hydromorphone", "dilaudid", "meperidine", "demerol", "oxycodone", "dexedrine", "fentanyl", "ritalin", "methadone", "amphetamine", "phencyclidine", "ephedrine"]
weapons = ["gun", "knife", "weapon", "firearm", "rifle", "carabine", "shotgun", "assaults rifle", "sword", "blunt objects"]
investigations = ["gang", "mafia", "serial killer", "rape", "thefts", "recidivism", "arrest", "ethnicity", "caucasian", "afroamerican", "native american", "hispanic", "gender", "male", "female", "man", "woman", "girl", "boy", "robbery", "cybercrime"]
narco_df = opinions_df.loc[opinions_df['text'].str.contains("|".join(narcotics)).any(level=0)] # 35410 rows / 6076 / 11038
narco_df.to_csv("narco_df.csv", index=False, sep="|")
!cp narco_df.csv /content/drive/MyDrive/Università/inforet_prj
del opinions_df
del authors_judges
import gc
gc.collect()
```
---
## **3. Text Cleaning**
Load Opinions from the previous step.
```
with open("/content/drive/MyDrive/Università/inforet_prj/authors_judges.pkl", "rb") as f:
authors_judges = pickle.load(f)
# Proper nouns found in the dataset
names = ["Brinks", "Flores", "People v.","Pinnix", "Garvey", "Steinbach", "Fowlar", "Mobil", "Milian", "TQ", "Yanez", "Tawanda", "Geder", "Mason", "Payne", "Bair", "ILCS", "tbe", "tbat", "Delores","Stivers", "Spades", "Snyders", "Nally", "Budaj", "Yacoo", "Cosgrove", "Cos-grove", "Gayles", "Hodges"]
def full_text_clean(text, is_sentence=False):
if text == '' or pd.isna(text) or not isinstance(text, str):
return ''
bb = (
text.replace(' U.S. ','US')
.replace(' S.Ct. ','SCt')
.replace(' f. supp. ', ' fsupp ')
.replace(' cir.', ' cir ')
.replace("[o]", "o")
.replace(" CIR ", " confidential source ")
.replace("Reg.", " regulation ")
.replace("miIe", " mile ")
.replace(" com mitted ", " committed ")
.replace("wtap", "tap")
)
if bb.strip() == '' or pd.isna(bb):
return ''
if not ' ' in bb:
return ''
temp = bb.split()
bb = " ".join([ele for ele in temp if not ele[0].isupper()])
if not is_sentence:
bb = bb.split(":")
if len(bb) > 1:
bb.pop(0)
bb = ' '.join(bb)
if bb.strip() == '' or pd.isna(bb):
return ''
bb = unidecode(re.sub(' +', ' ', bb.strip())) #any additional whitespaces and foreign characters
bb = bb.strip()
bb = re.sub('[0-9]{1,2} [Uu]\.[Ss]\.[Cc]\. §\s?\d+(\w+)?( \([0-9]{4}\))?',' USCCITATION ', bb)
bb = re.sub('[a-zA-Z]+ [vV]\. [a-zA-Z]+',' CaseAvCaseB ', bb) #CaseA v. CaseB = CaseAvCaseB
bb = re.sub('\d+ (Ark|Ill)\. \d+',' StateCase ', bb) #300 Ark. 230 = 300Ark230
bb = re.sub(' [Ss][Tt][Aa][Tt][Ss]\.',' StateCase2 ',bb) #300 Ark. 230 = 300Ark230
bb = re.sub('\d+ [A-z]+\.[ ]*[A-z]+\.[ ]*\d[A-z]+ \d+',' CaseRef ',bb) #953 S.W.2d 559 or 87 L.Ed.2d 481
bb = re.sub('[Jj][Rr]\.', 'Jr ', bb)
bb = re.sub('\d+ (Ark|Ill)\. App. \d+',' StateAppCase ', bb)
bb = re.sub('(Ark|Ill)\. Code Ann\. § ',' StateCodeSection ', bb)
bb = re.sub(' [Ii][Dd]\.',' Idem ', bb)
bb = re.sub('§+',' Section ', bb)
bb = re.sub('[Aa][Nn][Nn][Oo][:.]* \d+ [Aa]\.*[ ]*[Ll]\.*[ ]*[Rr]\.*[ ]*\d+','anno', bb)
bb = re.sub(' [Aa][Nn][Nn][Oo][:.]*',' anno', bb)
bb = re.sub('[Cc][Ff]\.','cf', bb)
bb = re.sub(' [Rr][Ee][Vv]\. [Ss][Tt][Aa][Tt]\.',' revstat ', bb)
bb = re.sub('[ \d]+[Pp][Aa][Rr]\.',' par ', bb)
bb = re.sub('[ \d]+[Ss][Tt][Aa][Tt]\.',' stat ', bb)
bb = re.sub("[\(\[].*?[\)\]]", "", bb)
bb = (
bb.replace("USCCITATION", "")
.replace("CaseAvCaseB", "")
.replace("StateCase", "")
.replace("StateCase2", "")
.replace("CaseRef", "")
.replace("StateAppCase", "")
.replace("StateCodeSection", "")
.replace("anno", "")
)
bb = unidecode(re.sub(' +', ' ', bb.strip()))
bb = bb.strip()
if bb.strip() == '' or pd.isna(bb):
return ''
doc = nlp(bb)
persons = set([str(ent.text).lower() for ent in doc.ents if ent.label_ == "PERSON"])
persons = [x.translate(str.maketrans('', '', string.punctuation)) for x in set(nltk.word_tokenize(" ".join(persons)))]
persons.extend(names)
result = []
for token in doc:
if (len(token.text) > 1
and token.text.isalpha() # Token is word
and token.pos_ not in ['NUM', 'PROPN'] # Token not NUM, PROPN nor ADV,, , 'ADV', 'PRON', 'CONJ'
and not token.is_punct # Token not punctuation
and not token.is_stop # Token not punctuation
and token.text not in authors_judges # Token is not a judge
and token.text not in persons # Token is not a persona name
):
result.append(token.lemma_.lower())
# Our result is a string of the form:
# "text lemma POS; text lemma POS; text lemma POS; ..."
result = " ".join(result)
return result
# 1 H
with open("narco_nlp_21set_nostop.csv", "w") as my_empty_csv:
pass
pbar = tqdm(total=6076 ) # narco_df total rows
chunksize = 1
for chunk in pd.read_csv("narco_df.csv", chunksize=chunksize, sep="|", usecols=["text"]):
chunk['spacy_nlp'] = chunk.apply(lambda row: full_text_clean(row["text"]), axis=1)
chunk.drop(columns=["text"], inplace=True)
chunk.to_csv("narco_nlp_21set_nostop.csv", index=False, sep="|", mode="a", header=False)
pbar.update(1)
pbar.close()
!cp narco_nlp_21set_nostop.csv /content/drive/MyDrive/Università/inforet_prj
```
Check that the cleaning worked properly.
```
narco_nlp = pd.read_csv(
"/content/drive/MyDrive/Università/inforet_prj/narco_nlp_21set_nostop.csv",
sep="|",
names=['spacy_nlp'],
header=None
)
assert narco_nlp.shape[0] > 0
assert narco_nlp.loc[pd.isna(narco_nlp.spacy_nlp)].shape[0] == 0
```
## **4. Split into Sentences**
```
opinions_df = pd.read_csv("/content/drive/MyDrive/Università/inforet_prj/narco_df.csv", sep="|")
narco_pmi_ = opinions_df.loc[:, [ "text"]]
narco_pmi_["sentences"] = narco_pmi_.text.progress_apply(lambda x: sent_tokenize(x))
narco_sentences = narco_pmi_.explode('sentences')
narco_sentences = narco_sentences.drop(columns=["text"]).reset_index().rename(columns={"index": "opinion_id"})
narco_sentences.to_csv("narco_sentences.csv", index=False, sep="|")
```
---
```
opinions_df = pd.read_csv("/content/drive/MyDrive/Università/inforet_prj/narco_df.csv", sep="|")
schedule_1 = ["cannabis", "marijuana", "lsd", "heroin", 'methaqualone', "ecstasy", "peyote", "mescaline", "mda", "mdma"] #https://www.dea.gov/drug-information/drug-scheduling
schedule_2 = ["cocaine", "methamphetamine", "hydromorphone", "dilaudid", "meperidine", "demerol", "oxycodone", "dexedrine", "fentanyl", "ritalin", "methadone", "amphetamine", "phencyclidine", "pseudoephedrine", "ephedrine", "meth", "opium", "dilaudid", "preludin"]
schedule_3 = ["ketamine", "anabolic" , "steroids", "testosterone", "ketamine"]
schedule_4 = ["modafinil", "provigil", "adderall", "methylphenidate", "memantine", "axura", "soma", "xanax", "darvon", "darvocet", "valium", "ativan", "talwin", "ambien", "tramadol", "ethclorvynol"]
schedule_5 = ["phenylpropanolamine", "lomotil", "motofen", "lyrica", "parepectolin", "tetracaine"]
conditions = [
(opinions_df['text'].str.contains("|".join(schedule_1))) & ~(opinions_df['text'].str.contains("|".join(schedule_3))) & ~(opinions_df['text'].str.contains("|".join(schedule_2))) & ~(opinions_df['text'].str.contains("|".join(schedule_4))) & ~(opinions_df['text'].str.contains("|".join(schedule_5))),
(opinions_df['text'].str.contains("|".join(schedule_2))) & ~(opinions_df['text'].str.contains("|".join(schedule_3))) & ~(opinions_df['text'].str.contains("|".join(schedule_1))) & ~(opinions_df['text'].str.contains("|".join(schedule_4))) & ~(opinions_df['text'].str.contains("|".join(schedule_5))),
(opinions_df['text'].str.contains("|".join(schedule_3))) & ~(opinions_df['text'].str.contains("|".join(schedule_1))) & ~(opinions_df['text'].str.contains("|".join(schedule_2))) & ~(opinions_df['text'].str.contains("|".join(schedule_4))) & ~(opinions_df['text'].str.contains("|".join(schedule_5))),
(opinions_df['text'].str.contains("|".join(schedule_4))) & ~(opinions_df['text'].str.contains("|".join(schedule_3))) & ~(opinions_df['text'].str.contains("|".join(schedule_2))) & ~(opinions_df['text'].str.contains("|".join(schedule_1))) & ~(opinions_df['text'].str.contains("|".join(schedule_5)))
]
# create a list of the values we want to assign for each condition
values = ['narco_1', 'narco_2', 'narco_3', 'narco_4']
# create a new column and use np.select to assign values to it using our lists as arguments
opinions_df['schedule'] = np.select(conditions, values)
# display updated DataFrame
opinions_df.head()
narco_pmi = opinions_df.loc[:, ["schedule", "text"]]
narco_1_pmi = narco_pmi.loc[narco_pmi['schedule'] == 'narco_1'] # 1969
narco_2_pmi = narco_pmi.loc[narco_pmi['schedule'] == 'narco_2'] # 1782
narco_1_pmi["sentences"] = narco_1_pmi.text.progress_apply(lambda x: sent_tokenize(x))
narco_2_pmi["sentences"] = narco_2_pmi.text.progress_apply(lambda x: sent_tokenize(x))
narco_sentences_1 = narco_1_pmi.explode('sentences')
narco_sentences_2 = narco_2_pmi.explode('sentences')
narco_sentences_1 = narco_sentences_1.drop(columns=["text"]).reset_index().rename(columns={"index": "opinion_id"})
narco_sentences_2 = narco_sentences_2.drop(columns=["text"]).reset_index().rename(columns={"index": "opinion_id"})
narco_sentences_1.to_csv("narco_sentences_1.csv", index=False, sep="|")
narco_sentences_2.to_csv("narco_sentences_2.csv", index=False, sep="|")
!cp narco_sentences_1.csv /content/drive/MyDrive/Università/inforet_prj # 239155
!cp narco_sentences_2.csv /content/drive/MyDrive/Università/inforet_prj # 325030
```
---
```
narco_sentences = pd.read_csv("/content/drive/MyDrive/Università/inforet_prj/narco_sentences.csv", sep="|")
narco_sentences = pd.read_csv("/content/drive/MyDrive/Università/inforet_prj/narco_sentences.csv", sep="|")
narco_sentences_1 = pd.read_csv("/content/drive/MyDrive/Università/inforet_prj/narco_sentences_1.csv", sep="|")
narco_sentences_2 = pd.read_csv("/content/drive/MyDrive/Università/inforet_prj/narco_sentences_2.csv", sep="|")
names = ["Brinks", "Flores", "People v.","Pinnix", "Garvey", "Steinbach", "Fowlar", "Mobil", "Milian", "TQ", "Yanez", "Tawanda", "Geder", "Mason", "Payne", "Bair", "ILCS", "tbe", "tbat", "Delores","Stivers", "Spades", "Snyders", "Nally", "Budaj", "Yacoo", "Cosgrove", "Cos-grove", "Gayles", "Hodges"]
# 5 H
with open("narco_pmi_nlp.csv", "w") as my_empty_csv:
pass
pbar = tqdm(total=1056006) # narco_sentences total rows 1056006
chunksize = 1
for chunk in pd.read_csv("/content/drive/MyDrive/Università/inforet_prj/narco_sentences.csv", chunksize=chunksize, sep="|", usecols=["sentences"]):
chunk['sent_clean'] = chunk.apply(lambda row: full_text_clean(row["sentences"], is_sentence=True), axis=1)
chunk.drop(columns=["sentences"], inplace=True)
chunk.to_csv("narco_pmi_nlp.csv", index=False, sep="|", mode="a", header=False)
pbar.update(1)
pbar.close()
!cp narco_pmi_nlp.csv /content/drive/MyDrive/Università/inforet_prj
# 1 H
with open("narco_1_pmi_nlp.csv", "w") as my_empty_csv:
pass
pbar = tqdm(total=239155) # narco_sentences total rows 239155
chunksize = 1
for chunk in pd.read_csv("/content/drive/MyDrive/Università/inforet_prj/narco_sentences_1.csv", chunksize=chunksize, sep="|", usecols=["sentences"]):
chunk['sent_clean'] = chunk.apply(lambda row: full_text_clean(row["sentences"], is_sentence=True), axis=1)
chunk.drop(columns=["sentences"], inplace=True)
chunk.to_csv("narco_1_pmi_nlp.csv", index=False, sep="|", mode="a", header=False)
pbar.update(1)
pbar.close()
!cp narco_1_pmi_nlp.csv /content/drive/MyDrive/Università/inforet_prj
# 1 H
with open("narco_2_pmi_nlp.csv", "w") as my_empty_csv:
pass
pbar = tqdm(total=325030) # narco_sentences total rows 325030
chunksize = 1
for chunk in pd.read_csv("/content/drive/MyDrive/Università/inforet_prj/narco_sentences_2.csv", chunksize=chunksize, sep="|", usecols=["sentences"]):
chunk['sent_clean'] = chunk.apply(lambda row: full_text_clean(row["sentences"], is_sentence=True), axis=1)
chunk.drop(columns=["sentences"], inplace=True)
chunk.to_csv("narco_2_pmi_nlp.csv", index=False, sep="|", mode="a", header=False)
pbar.update(1)
pbar.close()
!cp narco_2_pmi_nlp.csv /content/drive/MyDrive/Università/inforet_prj
```
| github_jupyter |
# Plagiarism Detection Model
Now that you've created training and test data, you are ready to define and train a model. Your goal in this notebook, will be to train a binary classification model that learns to label an answer file as either plagiarized or not, based on the features you provide the model.
This task will be broken down into a few discrete steps:
* Upload your data to S3.
* Define a binary classification model and a training script.
* Train your model and deploy it.
* Evaluate your deployed classifier and answer some questions about your approach.
To complete this notebook, you'll have to complete all given exercises and answer all the questions in this notebook.
> All your tasks will be clearly labeled **EXERCISE** and questions as **QUESTION**.
It will be up to you to explore different classification models and decide on a model that gives you the best performance for this dataset.
---
## Load Data to S3
In the last notebook, you should have created two files: a `training.csv` and `test.csv` file with the features and class labels for the given corpus of plagiarized/non-plagiarized text data.
>The below cells load in some AWS SageMaker libraries and creates a default bucket. After creating this bucket, you can upload your locally stored data to S3.
Save your train and test `.csv` feature files, locally. To do this you can run the second notebook "2_Plagiarism_Feature_Engineering" in SageMaker or you can manually upload your files to this notebook using the upload icon in Jupyter Lab. Then you can upload local files to S3 by using `sagemaker_session.upload_data` and pointing directly to where the training data is saved.
```
import pandas as pd
import boto3
import sagemaker
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# session and role
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
# create an S3 bucket
bucket = sagemaker_session.default_bucket()
```
## EXERCISE: Upload your training data to S3
Specify the `data_dir` where you've saved your `train.csv` file. Decide on a descriptive `prefix` that defines where your data will be uploaded in the default S3 bucket. Finally, create a pointer to your training data by calling `sagemaker_session.upload_data` and passing in the required parameters. It may help to look at the [Session documentation](https://sagemaker.readthedocs.io/en/stable/session.html#sagemaker.session.Session.upload_data) or previous SageMaker code examples.
You are expected to upload your entire directory. Later, the training script will only access the `train.csv` file.
```
import os
# should be the name of directory you created to save your features data
data_dir = 'plagiarism_data'
# set prefix, a descriptive name for a directory
prefix = 'plagiarism'
# upload all data to S3
test_location = sagemaker_session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix, bucket=bucket)
train_location = sagemaker_session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix, bucket=bucket)
```
### Test cell
Test that your data has been successfully uploaded. The below cell prints out the items in your S3 bucket and will throw an error if it is empty. You should see the contents of your `data_dir` and perhaps some checkpoints. If you see any other files listed, then you may have some old model files that you can delete via the S3 console (though, additional files shouldn't affect the performance of model developed in this notebook).
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# confirm that data is in S3 bucket
empty_check = []
for obj in boto3.resource('s3').Bucket(bucket).objects.all():
empty_check.append(obj.key)
print(obj.key)
assert len(empty_check) !=0, 'S3 bucket is empty.'
print('Test passed!')
```
---
# Modeling
Now that you've uploaded your training data, it's time to define and train a model!
The type of model you create is up to you. For a binary classification task, you can choose to go one of three routes:
* Use a built-in classification algorithm, like LinearLearner.
* Define a custom Scikit-learn classifier, a comparison of models can be found [here](https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html).
* Define a custom PyTorch neural network classifier.
It will be up to you to test out a variety of models and choose the best one. Your project will be graded on the accuracy of your final model.
---
## EXERCISE: Complete a training script
To implement a custom classifier, you'll need to complete a `train.py` script. You've been given the folders `source_sklearn` and `source_pytorch` which hold starting code for a custom Scikit-learn model and a PyTorch model, respectively. Each directory has a `train.py` training script. To complete this project **you only need to complete one of these scripts**; the script that is responsible for training your final model.
A typical training script:
* Loads training data from a specified directory
* Parses any training & model hyperparameters (ex. nodes in a neural network, training epochs, etc.)
* Instantiates a model of your design, with any specified hyperparams
* Trains that model
* Finally, saves the model so that it can be hosted/deployed, later
### Defining and training a model
Much of the training script code is provided for you. Almost all of your work will be done in the `if __name__ == '__main__':` section. To complete a `train.py` file, you will:
1. Import any extra libraries you need
2. Define any additional model training hyperparameters using `parser.add_argument`
2. Define a model in the `if __name__ == '__main__':` section
3. Train the model in that same section
Below, you can use `!pygmentize` to display an existing `train.py` file. Read through the code; all of your tasks are marked with `TODO` comments.
**Note: If you choose to create a custom PyTorch model, you will be responsible for defining the model in the `model.py` file,** and a `predict.py` file is provided. If you choose to use Scikit-learn, you only need a `train.py` file; you may import a classifier from the `sklearn` library.
### Provided code
If you read the code above, you can see that the starter code includes a few things:
* Model loading (`model_fn`) and saving code
* Getting SageMaker's default hyperparameters
* Loading the training data by name, `train.csv` and extracting the features and labels, `train_x`, and `train_y`
If you'd like to read more about model saving with [joblib for sklearn](https://scikit-learn.org/stable/modules/model_persistence.html) or with [torch.save](https://pytorch.org/tutorials/beginner/saving_loading_models.html), click on the provided links.
---
# Create an Estimator
When a custom model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained; the `train.py` function you specified above. To run a custom training script in SageMaker, construct an estimator, and fill in the appropriate constructor arguments:
* **entry_point**: The path to the Python script SageMaker runs for training and prediction.
* **source_dir**: The path to the training script directory `source_sklearn` OR `source_pytorch`.
* **entry_point**: The path to the Python script SageMaker runs for training and prediction.
* **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`.
* **entry_point**: The path to the Python script SageMaker runs for training.
* **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`.
* **role**: Role ARN, which was specified, above.
* **train_instance_count**: The number of training instances (should be left at 1).
* **train_instance_type**: The type of SageMaker instance for training. Note: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.
* **sagemaker_session**: The session used to train on Sagemaker.
* **hyperparameters** (optional): A dictionary `{'name':value, ..}` passed to the train function as hyperparameters.
Note: For a PyTorch model, there is another optional argument **framework_version**, which you can set to the latest version of PyTorch, `1.0`.
## EXERCISE: Define a Scikit-learn or PyTorch estimator
To import your desired estimator, use one of the following lines:
```
from sagemaker.sklearn.estimator import SKLearn
```
```
from sagemaker.pytorch import PyTorch
```
```
# your import and estimator code, here
from sagemaker.sklearn.estimator import SKLearn
estimator = SKLearn(entry_point='train.py',
source_dir='source_sklearn',
role=role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
framework_version='0.20.0',
sagemaker_session=sagemaker_session,
hyperparameters={
'max_leaf_nodes': 30,
'max_depth':2,
}
)
```
## EXERCISE: Train the estimator
Train your estimator on the training data stored in S3. This should create a training job that you can monitor in your SageMaker console.
```
%%time
# Train your estimator on S3 training data
# s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='text/csv')
# linear_learner.fit({'train':s3_input_train})
estimator.fit({'train': train_location})
```
## EXERCISE: Deploy the trained model
After training, deploy your model to create a `predictor`. If you're using a PyTorch model, you'll need to create a trained `PyTorchModel` that accepts the trained `<model>.model_data` as an input parameter and points to the provided `source_pytorch/predict.py` file as an entry point.
To deploy a trained model, you'll use `<model>.deploy`, which takes in two arguments:
* **initial_instance_count**: The number of deployed instances (1).
* **instance_type**: The type of SageMaker instance for deployment.
Note: If you run into an instance error, it may be because you chose the wrong training or deployment instance_type. It may help to refer to your previous exercise code to see which types of instances we used.
```
%%time
# uncomment, if needed
# from sagemaker.pytorch import PyTorchModel
predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
```
---
# Evaluating Your Model
Once your model is deployed, you can see how it performs when applied to our test data.
The provided cell below, reads in the test data, assuming it is stored locally in `data_dir` and named `test.csv`. The labels and features are extracted from the `.csv` file.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import os
# read in test data, assuming it is stored locally
test_data = pd.read_csv(os.path.join(data_dir, "test.csv"), header=None, names=None)
# labels are in the first column
test_y = test_data.iloc[:,0]
test_x = test_data.iloc[:,1:]
```
## EXERCISE: Determine the accuracy of your model
Use your deployed `predictor` to generate predicted, class labels for the test data. Compare those to the *true* labels, `test_y`, and calculate the accuracy as a value between 0 and 1.0 that indicates the fraction of test data that your model classified correctly. You may use [sklearn.metrics](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics) for this calculation.
**To pass this project, your model should get at least 90% test accuracy.**
```
# First: generate predicted, class labels
import json
test_y_preds = predictor.predict(test_x.values)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# test that your model generates the correct number of labels
assert len(test_y_preds)==len(test_y), 'Unexpected number of predictions.'
print('Test passed!')
# Using Sklearn
from sklearn.metrics import accuracy_score
accuracy_score(y_true=test_y.values, y_pred=test_y_preds)
# Second: calculate the test accuracy
import numpy as np
test_labels = test_y.values
test_preds = np.asarray(test_y_preds)
tp = np.logical_and(test_labels, test_preds).sum()
fp = np.logical_and(1-test_labels, test_preds).sum()
fn = np.logical_and(test_labels, 1-test_preds).sum()
tn = np.logical_and(1-test_labels, 1-test_preds).sum()
accuracy = (tp+tn)/(fp+fn+tn+tp)
print(accuracy)
## print out the array of predicted and true labels, if you want
print('\nPredicted class labels: ')
print(test_y_preds)
print('\nTrue class labels: ')
print(test_y.values)
print('True Positives: ', tp)
print('True Negatives: ', tn)
print('False Positives: ', fp)
print('False Negatives: ', fn)
print('Recall: ', tp / (tp + fn))
print('Precision: ', tp / (fp + tp))
```
### Question 1: How many false positives and false negatives did your model produce, if any? And why do you think this is?
** Answer**:
The model produced 1 false positives and 0 false negatives
It produced this amount of false positives due to having a good accuracy score (96%)
This might be a reason to raise an alarm for overfitting, however with high scores in Recall (100%) and Precision (93%),
I do not think that is the case.
### Question 2: How did you decide on the type of model to use?
** Answer**:
Using an Decision Tree Classifier, I felt this would be a good decision as this algorithm can be used as a simple tree algorithm, and been able to set hyperparameters specific to this use case
----
## EXERCISE: Clean up Resources
After you're done evaluating your model, **delete your model endpoint**. You can do this with a call to `.delete_endpoint()`. You need to show, in this notebook, that the endpoint was deleted. Any other resources, you may delete from the AWS console, and you will find more instructions on cleaning up all your resources, below.
```
# uncomment and fill in the line below!
predictor.delete_endpoint()
```
### Deleting S3 bucket
When you are *completely* done with training and testing models, you can also delete your entire S3 bucket. If you do this before you are done training your model, you'll have to recreate your S3 bucket and upload your training data again.
```
# deleting bucket, uncomment lines below
bucket_to_delete = boto3.resource('s3').Bucket(bucket)
bucket_to_delete.objects.all().delete()
```
### Deleting all your models and instances
When you are _completely_ done with this project and do **not** ever want to revisit this notebook, you can choose to delete all of your SageMaker notebook instances and models by following [these instructions](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html). Before you delete this notebook instance, I recommend at least downloading a copy and saving it, locally.
---
## Further Directions
There are many ways to improve or add on to this project to expand your learning or make this more of a unique project for you. A few ideas are listed below:
* Train a classifier to predict the *category* (1-3) of plagiarism and not just plagiarized (1) or not (0).
* Utilize a different and larger dataset to see if this model can be extended to other types of plagiarism.
* Use language or character-level analysis to find different (and more) similarity features.
* Write a complete pipeline function that accepts a source text and submitted text file, and classifies the submitted text as plagiarized or not.
* Use API Gateway and a lambda function to deploy your model to a web application.
These are all just options for extending your work. If you've completed all the exercises in this notebook, you've completed a real-world application, and can proceed to submit your project. Great job!
| github_jupyter |
```
import argparse
from collections import namedtuple, OrderedDict
import itertools
import os
import numpy as np
from typing import Tuple
from typing import List
from typing import Dict
import random
from itertools import product
import copy
import re
import random
import hashlib
import pathlib
import json
import matplotlib as plt
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
os.environ['QT_QPA_PLATFORM']='offscreen'
plt.rcParams["font.family"] = "DejaVu Serif"
font = {'family' : 'DejaVu Serif',
'size' : 20}
plt.rc('font', **font)
import plotly.tools as tls
from utils import one_hot
from utils import generate_possible_object_names
from utils import numpy_array_to_image
from vocabulary import *
from object_vocabulary import *
from world import *
from grammer import *
from simulator import *
from relation_graph import *
import logging
import warnings
warnings.filterwarnings("ignore")
# Helpers.
def get_relation_statistics(command_structs):
"""
Return a dictionary, (relation, position) with counts
"""
stats = {}
for i in range(2): # at max 2!
stats[f"position-{i}"] = {}
for command in command_structs:
pos_id = 0
for k, v in command["rel_map"].items():
if v in stats[f"position-{pos_id}"].keys():
stats[f"position-{pos_id}"][v] += 1
else:
stats[f"position-{pos_id}"][v] = 1
pos_id += 1
return stats
def get_attribute_statistics(command_structs, include_keywords=["circle", "cylinder", "square", "box", "object"]):
stats = {}
# for k, v in command_structs[0]["obj_map"].items():
# stats[k] = {} # we can do it in object level!
for i in range(3): # at max 2!
stats[f"$OBJ_{i}"] = {}
for command in command_structs:
for k, v in command["obj_map"].items():
for keyword in include_keywords:
keyword_list = keyword.split(" ") # in case there are a couple!
match = True
for sub_k in keyword_list:
if sub_k not in v:
match = False
break
if match:
if keyword in stats[k].keys():
stats[k][keyword] += 1
else:
stats[k][keyword] = 1
return stats
def get_keyword_statistics(command_structs, include_keyword="adverb"):
stats = {}
for command in command_structs:
keyword = command[include_keyword]
if keyword in stats.keys():
stats[keyword] += 1
else:
stats[keyword] = 1
return stats
def flatten_dictionary(
dictionary_in
):
flat_dictionary = {}
for k, v in dictionary_in.items():
for kk, vv in v.items():
if kk not in flat_dictionary:
flat_dictionary[kk] = vv
else:
flat_dictionary[kk] += vv
return flat_dictionary
def plot_dictionary(
dictionary_in,
y_label="Frequency",
x_label="Conditions",
title="Missing Title",
save_file=None,
is_plot=False,
wandb=None,
):
group_str = [k for k, _ in dictionary_in[0].items()]
if len(group_str) > 8:
rotate=90
fontsize=10
else:
rotate=45
fontsize=13
all_stats = []
for d in dictionary_in:
group_stats = [d[k] for k in group_str]
all_stats.append(group_stats)
all_stats = np.array(all_stats)
std = np.std(all_stats, axis=0)
mean = np.mean(all_stats, axis=0)
# input data
mean_values = mean
variance = std**2
bar_labels = group_str
# plot bars
x_pos = list(range(len(bar_labels)))
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
g = ax.bar(x_pos, mean_values, yerr=variance, align='center', alpha=0.5)
plt.grid()
# set height of the y-axis
max_y = max(zip(mean_values, variance)) # returns a tuple, here: (3, 5)
plt.ylim([0, (max_y[0] + max_y[1]) * 1.1])
# set axes labels and title
plt.ylabel(y_label)
plt.xticks(x_pos, bar_labels)
plt.xticks(rotation = rotate, fontsize=fontsize)
plt.yticks(rotation = 45)
plt.title(title, fontsize=10)
if mean_values[0] > 10000:
plt.ticklabel_format(axis='y', style='sci', scilimits=(4,4))
if wandb != None:
# Let us also try to log this plot to wandb!
wandb.log({title: wandb.Image(fig)})
if save_file != None:
plt.savefig(save_file, dpi=100, bbox_inches='tight')
plt.close(fig)
else:
if is_plot:
plt.show()
def get_command_struct_statistics(
command_structs, run_name="ReaSCAN-Awesome", date="2021-05-06",
split="demo",
compositional_split=False,
n_sample=-1, n_runs=10,
output_dir="../../data-files/ReaSCAN-compositional_splits/",
save_to_disk=True,
wandb=None
):
statistics = OrderedDict({
"run_name": run_name,
"date": date,
"splits": split,
"number_of_these_examples_seen_in_training": -1 if not compositional_split else 0,
"number_of_command_structs": len(command_structs),
})
if n_sample == -1:
n_sample = len(command_structs)
# If we are downsampling, we need to do more runs as well!
random.shuffle(command_structs)
patterns = set([])
for command_s in command_structs:
patterns.add(command_s["grammer_pattern"])
statistics["command_patterns"] = list(patterns)
pattern_stats = get_keyword_statistics(command_structs, include_keyword="grammer_pattern")
statistics["pattern_stats"] = pattern_stats
# verb
verb_stats = get_keyword_statistics(command_structs, include_keyword="verb")
statistics["verb_stats"] = verb_stats
plot_dictionary(
[verb_stats],
title="Verbs",
save_file=os.path.join(output_dir, f"verb_stats-{split}.png"),
wandb=wandb,
)
# adverb
adverb_stats = get_keyword_statistics(command_structs, include_keyword="adverb")
# special handling for adverb for better readabilities
adverb_stats_rebuild = {}
for k, v in adverb_stats.items():
if k == "":
adverb_stats_rebuild["EMPTY"] = v
else:
adverb_stats_rebuild[k] = v
statistics["adverb_stats"] = adverb_stats_rebuild
plot_dictionary(
[adverb_stats_rebuild],
title="Adverbs",
save_file=os.path.join(output_dir, f"adverb_stats-{split}.png"),
wandb=wandb,
)
# relation
relation_stats = get_relation_statistics(command_structs)
if len(flatten_dictionary(relation_stats)) != 0:
statistics["relation_stats"] = relation_stats
plot_dictionary(
[flatten_dictionary(relation_stats)],
title="Relation-Types",
save_file=os.path.join(output_dir, f"relation_type_stats-{split}.png"),
wandb=wandb,
)
# attribute
nouns = ["circle", "cylinder", "square", "box", "object"]
n_stats = get_attribute_statistics(command_structs, include_keywords=nouns)
statistics["shape_stats"] = n_stats
plot_dictionary(
[flatten_dictionary(n_stats)],
title="Shapes",
save_file=os.path.join(output_dir, f"shape_stats-{split}.png"),
wandb=wandb,
)
color_adjectives = ["red", "blue", "green", "yellow"]
c_stats = get_attribute_statistics(command_structs, include_keywords=color_adjectives)
statistics["color_stats"] = c_stats
if len(flatten_dictionary(c_stats)) != 0:
plot_dictionary(
[flatten_dictionary(c_stats)],
title="Colors",
save_file=os.path.join(output_dir, f"color_stats-{split}.png"),
wandb=wandb,
)
size_adjectives = ["big", "small"]
s_stats = get_attribute_statistics(command_structs, include_keywords=size_adjectives)
if len(flatten_dictionary(s_stats)) != 0:
statistics["size_stats"] = s_stats
plot_dictionary(
[flatten_dictionary(s_stats)],
title="Sizes",
save_file=os.path.join(output_dir, f"size_stats-{split}.png"),
wandb=wandb,
)
# second order attribute
color_adjectives = ["red", "blue", "green", "yellow"]
nouns = ["circle", "cylinder", "square", "box", "object"]
c_n_p = product(color_adjectives, nouns)
include_keywords = [" ".join(c_n) for c_n in c_n_p]
c_n_stats = get_attribute_statistics(command_structs, include_keywords=include_keywords)
statistics["color_and_shape_stats"] = c_n_stats
if len(flatten_dictionary(c_n_stats)) != 0:
plot_dictionary(
[flatten_dictionary(c_n_stats)],
title="Colors-Shapes",
save_file=os.path.join(output_dir, f"color+shape_stats-{split}.png"),
wandb=wandb,
)
size_adjectives = ["big", "small"]
nouns = ["circle", "cylinder", "square", "box", "object"]
s_n_p = product(size_adjectives, nouns)
include_keywords = [" ".join(s_n) for s_n in s_n_p]
s_n_stats = get_attribute_statistics(command_structs, include_keywords=include_keywords)
statistics["size_and_shape_stats"] = s_n_stats
if len(flatten_dictionary(s_n_stats)) != 0:
plot_dictionary(
[flatten_dictionary(s_n_stats)],
title="Sizes-Shapes",
save_file=os.path.join(output_dir, f"size+shape_stats-{split}.png"),
wandb=wandb,
)
# third order attribute
size_adjectives = ["big", "small"]
color_adjectives = ["red", "blue", "green", "yellow"]
nouns = ["circle", "cylinder", "square", "box", "object"]
all_p = product(size_adjectives, color_adjectives, nouns)
include_keywords = [" ".join(a) for a in all_p]
all_stats = get_attribute_statistics(command_structs, include_keywords=include_keywords)
statistics["size_and_color_and_shape_stats"] = all_stats
if save_to_disk:
import yaml
with open(os.path.join(output_dir, f"command_struct_only_stats-{split}.yml"), 'w') as yaml_file:
yaml.dump(statistics, yaml_file, default_flow_style=False)
return statistics
def arg_parse():
# This is a single loop to generate the dataset.
n_processes = 1
mode = "all"
n_command_struct = 10000
grid_size = 6
n_object_max = 10
seed = 42
date = "2021-05-07"
per_command_world_retry_max = 200
per_command_world_target_count = 10 # for each command, we target to have 50 shapeWorld!
resumed_from_file_path = ""
is_tensorboard = False
parser = argparse.ArgumentParser(description='ReaSCAN argparse.')
# Experiment management:
parser.add_argument('--n_processes', type=int, default=1,
help='Number of process used to generate the dataset.')
parser.add_argument('--index_start', type=int, default=-1,
help='Number of command sampled from the command population.')
parser.add_argument('--index_end', type=int, default=-1,
help='Number of command sampled from the command population.')
parser.add_argument('--mode', type=str, default="all",
help='mode')
parser.add_argument('--n_command_struct', type=int, default=10000,
help='Number of command sampled from the command population.')
parser.add_argument('--grid_size', type=int, default=6,
help='Grid size of the world.')
parser.add_argument('--n_object_max', type=int, default=10,
help='Number of object at max in the shapeWorld (Note that you may still have more than this number!).')
parser.add_argument('--seed', type=int, default=42,
help='Random seed.')
parser.add_argument('--date', type=str,
help='date')
parser.add_argument('--per_command_world_retry_max', type=int, default=200,
help='How many times you can retry for each world generation.')
parser.add_argument('--per_command_world_target_count', type=int, default=50,
help='The targeted number of world to have per command.')
parser.add_argument("--is_tensorboard",
default=False,
action='store_true',
help="Whether to use tensorboard.")
parser.add_argument("--include_relation_distractor",
default=False,
action='store_true',
help="Whether to use tensorboard.")
parser.add_argument("--include_attribute_distractor",
default=False,
action='store_true',
help="Whether to use tensorboard.")
parser.add_argument("--include_isomorphism_distractor",
default=False,
action='store_true',
help="Whether to use tensorboard.")
parser.add_argument("--include_random_distractor",
default=False,
action='store_true',
help="Whether to use tensorboard.")
parser.add_argument('--full_relation_probability', type=float, default=1.0,
help='Probability of including full relation distractors.')
parser.add_argument('--save_interal', type=int, default=200,
help='Saving intervel in command count.')
parser.add_argument('--command_pattern', type=str, default="p3",
help='What pattern to use, currently, we support p1-p4.')
parser.add_argument('--resumed_from_file_path', type=str, default="",
help='Whether to resume for this file.')
parser.add_argument('--output_dir', type=str, default="../../data-files/ReaSCAN-compositional_splits/",
help='Whether to resume for this file.')
parser.set_defaults(
# Exp management:
n_processes=1,
mode="all",
n_command_struct=10000,
grid_size=6,
n_object_max=10,
seed=42,
date="2021-05-07",
per_command_world_retry_max=200,
per_command_world_target_count=50,
resumed_from_file_path="",
is_tensorboard=False,
output_dir="../../data-files/ReaSCAN-compositional_splits/",
)
try:
get_ipython().run_line_magic('matplotlib', 'inline')
args = parser.parse_args([])
except:
args = parser.parse_args()
return args
def example_classifier(
task_info,
mode="demo",
default_split_prob={
"train": 0.9,
"dev": 0.01,
"test": 0.09,
},
):
"""
This will return the split this data belongs to.
"""
if mode == "demo" or mode == "all":
if random.random() < default_split_prob["train"]:
return "train"
else:
if random.random() < 0.9:
return "test"
else:
return "dev"
else:
# We need to add here logics to determine
# compositional splits!
pass
# Some tips:
# Do not debug in this file, you can simply copy the questionable struct
# to the lightweight demo file, and you can debug there!
if __name__ == "__main__":
# Loading arguments
args = arg_parse()
try:
# get_ipython().run_line_magic('matplotlib', 'inline')
# # Experiment management:
# args.n_processes=1
# args.mode="demo"
# args.n_command_struct=20
# args.grid_size=6
# args.n_object_max=10
# args.seed=42
# args.date="2021-05-07"
# args.per_command_world_retry_max=20
# args.per_command_world_target_count=3
# args.resumed_from_file_path=""
# args.is_tensorboard=True # Let us try this!
# args.output_dir="../../data-files/ReaSCAN-demo/"
# is_jupyter = True
get_ipython().run_line_magic('matplotlib', 'inline')
# Experiment management:
args.n_processes=1
args.mode="train"
args.n_command_struct=675*5
args.grid_size=6
args.n_object_max=10
args.seed=42
args.save_interal = 200
args.date="2021-05-30"
args.per_command_world_retry_max=1000
args.per_command_world_target_count=180
args.resumed_from_file_path=""
args.is_tensorboard=True # Let us try this!
args.output_dir="../../data-files/ReaSCAN-compositional-p3-full-relation/"
is_jupyter = True
args.index_start = -1
args.index_end = -1
except:
is_jupyter = False
loading_p1 = True if args.command_pattern == "p1" else False
p1_exhaustive_verb_adverb = False
loading_p2 = True if args.command_pattern == "p2" else False
loading_p3 = True if args.command_pattern == "p3" else False
loading_p4 = True if args.command_pattern == "p4" else False
save_command_stats = False
save_at_interval = True
save_interal = args.save_interal
# TODO: add these to args.
logging_interval = 1000
# Create output directory if not exists.
pathlib.Path(args.output_dir).mkdir(parents=True, exist_ok=True)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=os.path.join(args.output_dir, "generator.log"),
)
logger = logging.getLogger(__name__)
logging.getLogger().addHandler(logging.StreamHandler(os.sys.stdout))
logger.info("Generating ReaSCAN with following parameters: ")
logger.info(args)
# This is a single loop to generate the dataset.
n_processes = args.n_processes
mode = args.mode
n_command_struct = args.n_command_struct
grid_size = args.grid_size
n_object_max = args.n_object_max
seed = args.seed
date = args.date
per_command_world_retry_max = args.per_command_world_retry_max
per_command_world_target_count = args.per_command_world_target_count # for each command, we target to have 50 shapeWorld!
resumed_from_file_path = args.resumed_from_file_path
output_dir = args.output_dir
is_tensorboard = args.is_tensorboard
if is_tensorboard:
logger.warning("Enabling wandb for tensorboard logging...")
import wandb
run = wandb.init(project="ReaSCAN", entity="wuzhengx")
run_name = wandb.run.name
wandb.config.update(args)
else:
wandb = None
random.seed(seed)
np.random.seed(seed)
# We also need something to generate generalization
# splits!
params = {
"n_processes": n_processes,
"mode": mode,
"n_command_struct": n_command_struct,
"grid_size": grid_size,
"n_object_max": n_object_max,
"seed": seed,
"per_command_world_retry_max": per_command_world_retry_max,
"per_command_world_target_count": per_command_world_target_count,
}
if mode == "all" or mode == "demo" or mode == "train":
# Meaning we are generating the random ReaSCAN train + dev + test splits!
logger.warning(f"You are generating data for {mode} splits only!")
split_percentage = {
"train": 0.9,
}
elif mode == "all,noval_1,noval_2,noval_3,noval_4":
# here we need to define how to check for noval_*
pass
elif mode == "compositional":
# Meaning we are generating the random ReaSCAN train + dev + test splits!
logger.warning("You are generating data for all compositional splits!")
elif mode == "":
pass # Not implemented!
# Using the full vocabulary.
intransitive_verbs = ["walk"]
transitive_verbs = ["push", "pull"]
adverbs = ["while zigzagging", "while spinning", "cautiously", "hesitantly"]
nouns = ["circle", "cylinder", "square", "box"]
color_adjectives = ["red", "blue", "green", "yellow"]
size_adjectives = ["big", "small"]
relative_pronouns = ["that is"]
relation_clauses = ["in the same row as",
"in the same column as",
"in the same color as",
"in the same shape as",
"in the same size as",
"inside of"]
vocabulary = Vocabulary.initialize(intransitive_verbs=intransitive_verbs,
transitive_verbs=transitive_verbs, adverbs=adverbs, nouns=nouns,
color_adjectives=color_adjectives,
size_adjectives=size_adjectives,
relative_pronouns=relative_pronouns,
relation_clauses=relation_clauses)
# test out the object vocab
min_object_size = 1
max_object_size = 4
object_vocabulary = ObjectVocabulary(shapes=vocabulary.get_semantic_shapes(),
colors=vocabulary.get_semantic_colors(),
min_size=min_object_size, max_size=max_object_size)
# Generating all the core command structs.
grammer = Grammer(vocabulary)
# Bootup our simulator.
simulator = Simulator(
object_vocabulary, vocabulary,
grid_size=grid_size,
n_object_max=n_object_max,
)
command_structs = []
logger.info("Finished loading required modules...")
# Sampling all the possible command score structs.
if loading_p4:
# Currently, we hard-code the pattern!
grammer_pattern = '$OBJ_0 ^ $OBJ_1 & $OBJ_2 & $OBJ_3'
logger.info(f"Including pattern:= {grammer_pattern}...")
# Sampling relations
relations = grammer.sample_object_relation_grammer(
'$OBJ_0',
grammer.build_dependency_graph(grammer_pattern))
for relation in relations:
obj_pattern_map = relation[0]
rel_map = relation[1]
grammer_bindings = grammer.grounding_grammer_with_vocabulary(grammer_pattern, obj_pattern_map, rel_map)
for obj_map in grammer_bindings:
# here, we also sample the verb and adverb bindings!
adverb_enhance_list = vocabulary.get_adverbs()
adverb_enhance_list += [""]
command_struct = {
"obj_pattern_map" : obj_pattern_map,
"rel_map" : rel_map,
"obj_map" : obj_map,
"grammer_pattern" : grammer_pattern,
"adverb" : random.choice(adverb_enhance_list),
"verb" : random.choice(vocabulary.get_transitive_verbs() + vocabulary.get_intransitive_verbs()),
}
command_structs += [command_struct]
if loading_p3:
# Currently, we hard-code the pattern!
grammer_pattern = '$OBJ_0 ^ $OBJ_1 & $OBJ_2'
logger.info(f"Including pattern:= {grammer_pattern}...")
# Sampling relations
relations = grammer.sample_object_relation_grammer(
'$OBJ_0',
grammer.build_dependency_graph(grammer_pattern))
for relation in relations:
obj_pattern_map = relation[0]
rel_map = relation[1]
grammer_bindings = grammer.grounding_grammer_with_vocabulary(grammer_pattern, obj_pattern_map, rel_map)
for obj_map in grammer_bindings:
# here, we also sample the verb and adverb bindings!
adverb_enhance_list = vocabulary.get_adverbs()
adverb_enhance_list += [""]
command_struct = {
"obj_pattern_map" : obj_pattern_map,
"rel_map" : rel_map,
"obj_map" : obj_map,
"grammer_pattern" : grammer_pattern,
"adverb" : random.choice(adverb_enhance_list),
"verb" : random.choice(vocabulary.get_transitive_verbs() + vocabulary.get_intransitive_verbs()),
}
command_structs += [command_struct]
if loading_p2:
grammer_pattern = '$OBJ_0 ^ $OBJ_1'
logger.info(f"Including pattern:= {grammer_pattern}...")
# Sampling relations
relations = grammer.sample_object_relation_grammer(
'$OBJ_0',
grammer.build_dependency_graph(grammer_pattern))
for relation in relations:
obj_pattern_map = relation[0]
rel_map = relation[1]
grammer_bindings = grammer.grounding_grammer_with_vocabulary(grammer_pattern, obj_pattern_map, rel_map)
for obj_map in grammer_bindings:
# here, we also sample the verb and adverb bindings!
adverb_enhance_list = vocabulary.get_adverbs()
adverb_enhance_list += [""]
command_struct = {
"obj_pattern_map" : obj_pattern_map,
"rel_map" : rel_map,
"obj_map" : obj_map,
"grammer_pattern" : grammer_pattern,
"adverb" : random.choice(adverb_enhance_list),
"verb" : random.choice(vocabulary.get_transitive_verbs() + vocabulary.get_intransitive_verbs()),
}
command_structs += [command_struct]
if loading_p1:
p1_exhaustive_verb_adverb = True
# for gSCAN command, we don't need to undersample, they are small!
grammer_pattern = '$OBJ_0'
logger.info(f"Including pattern:= {grammer_pattern}...")
# Sampling relations
relations = grammer.sample_object_relation_grammer(
'$OBJ_0',
grammer.build_dependency_graph(grammer_pattern))
for relation in relations:
obj_pattern_map = relation[0]
rel_map = relation[1]
grammer_bindings = grammer.grounding_grammer_with_vocabulary(grammer_pattern, obj_pattern_map, rel_map)
for obj_map in grammer_bindings:
if p1_exhaustive_verb_adverb:
for adverb in vocabulary.get_adverbs() + [""]:
for verb in vocabulary.get_transitive_verbs() + vocabulary.get_intransitive_verbs():
# here, we also sample the verb and adverb bindings!
command_struct = {
"obj_pattern_map" : obj_pattern_map,
"rel_map" : rel_map,
"obj_map" : obj_map,
"grammer_pattern" : grammer_pattern,
"adverb" : adverb,
"verb" : verb,
}
command_structs += [command_struct]
# We only sample these command!
"""
WARNING: beaware that not all command struct can
be sampled for world-command pair! They may or
may not fail.
"""
under_sample = True
if under_sample:
sampled_command_struct = []
random.shuffle(command_structs)
if n_command_struct != -1:
sampled_command_struct = command_structs[:n_command_struct]
if args.index_start == -1 or args.index_end == -1:
pass
else:
# we only look at one shard! this is for multiprocess
logger.info(f"WARNING: contine with sharding: start at {args.index_start}; end at {args.index_end}")
sampled_command_struct = command_structs[args.index_start:args.index_end]
logger.info(f"Sampled {len(sampled_command_struct)} from {len(command_structs)} core command structs for pattern={grammer_pattern}.")
logger.info(f"Finished sampling core command structs with total {len(sampled_command_struct)}...")
command_struct_file_path = os.path.join(args.output_dir, f"command_struct-{args.mode}.txt")
formatted_sampled_command_struct = []
for command_struct in sampled_command_struct:
formatted_command_struct = {
"obj_pattern_map" : command_struct["obj_pattern_map"],
"rel_map" : [(k, v) for k, v in command_struct["rel_map"].items()],
"obj_map" : command_struct["obj_map"],
"grammer_pattern" : command_struct["grammer_pattern"],
"adverb" : command_struct["adverb"],
"verb" : command_struct["verb"],
}
formatted_sampled_command_struct += [formatted_command_struct]
# dump to the disk.
with open(command_struct_file_path, "w") as fd:
json.dump(formatted_sampled_command_struct, fd, indent=4)
logger.info(f"Saved command struct to {command_struct_file_path} for later use...")
# print out quick stats on how many command per pattern!
per_pattern_command_count = {}
for command_struct in sampled_command_struct:
grammer_pattern = command_struct["grammer_pattern"]
if grammer_pattern in per_pattern_command_count.keys():
per_pattern_command_count[grammer_pattern] += 1
else:
per_pattern_command_count[grammer_pattern] = 1
logger.info(f"Counts per command pattern: ")
logger.info(per_pattern_command_count)
# From the struct, let us sample shape world.
"""
We just need a couple more steps beyond this point:
(1) Sample a world
(2) Making sure it is valid
(3) Construct the command, providing determiners
(4) Generate action sequences to the target
(5) Get all the action related metadata as gSCAN
(6) Save it to per command example
"""
# We need a way to index the sampled command.
sampled_command_struct_indexed = OrderedDict({})
global_command_struct_index = 0
for command_struct in sampled_command_struct:
sampled_command_struct_indexed[global_command_struct_index] = command_struct
global_command_struct_index += 1
root = "$OBJ_0"
per_command_world_counts = OrderedDict({})
if mode == "demo" or mode == "all" or mode == "train":
created_examples_by_splits = OrderedDict({
"train" : [],
})
else:
pass
shaperized_command_struct = []
per_command_world_unique_check = OrderedDict({})
# Some global control for data quality control.
global_step = 0
success_step = 0
# Distractor info logs.
d_full_relation_count = 0
d_relation_count = 0
d_attribute_count = 0
d_iso_count = 0
d_random_count = 0
logger.info(f"Started to generate the dataset...")
for command_struct_index, command_struct in sampled_command_struct_indexed.items():
logger.info(f"Generating for command struct (seed={seed}): {command_struct_index+1}/{len(sampled_command_struct_indexed)}...")
per_command_world_counts[command_struct_index] = 0 # 0 world for each command in the beginning!
per_command_world_unique_check[command_struct_index] = set([])
obj_pattern_map = command_struct["obj_pattern_map"]
rel_map = command_struct["rel_map"]
obj_map = command_struct["obj_map"]
grammer_pattern = command_struct["grammer_pattern"]
verb = command_struct["verb"]
adverb = command_struct["adverb"]
# This is the target world number generated for this command
for n_world_try in range(per_command_world_target_count):
# How many time we need to retry before we give up?
at_least_success = False
for n_retry in range(per_command_world_retry_max):
global_step += 1
if success_step == 0:
denom = 1
else:
denom = success_step
d_full_relation_ratio = 1.0*d_full_relation_count/denom
d_relation_ratio = 1.0*d_relation_count/denom
d_attribute_ratio = 1.0*d_attribute_count/denom
d_iso_ratio = 1.0*d_iso_count/denom
d_random_ratio = 1.0*d_random_count/denom
global_success_ratio = 1.0*success_step/global_step
# logging some very useful information to wandb if avaliable!
if is_tensorboard:
if (global_step%logging_interval) == 0:
wandb.log({'global_success_ratio': global_success_ratio, 'global_step': global_step})
wandb.log({'current_example_count': success_step, 'global_step': global_step})
wandb.log({'d_full_relation_ratio': d_full_relation_ratio, 'global_step': global_step})
wandb.log({'d_relation_ratio': d_relation_ratio, 'global_step': global_step})
wandb.log({'d_attribute_ratio': d_attribute_ratio, 'global_step': global_step})
wandb.log({'d_iso_ratio': d_iso_ratio, 'global_step': global_step})
wandb.log({'d_random_ratio': d_random_ratio, 'global_step': global_step})
else:
if (global_step%(logging_interval*10)) == 0:
logger.info({'global_success_ratio': global_success_ratio, 'global_step': global_step})
logger.info({'current_example_count': success_step, 'global_step': global_step})
logger.info({'d_full_relation_ratio': d_full_relation_ratio, 'global_step': global_step})
logger.info({'d_relation_ratio': d_relation_ratio, 'global_step': global_step})
logger.info({'d_attribute_ratio': d_attribute_ratio, 'global_step': global_step})
logger.info({'d_iso_ratio': d_iso_ratio, 'global_step': global_step})
logger.info({'d_random_ratio': d_random_ratio, 'global_step': global_step})
if mode == "demo":
sampled_world = simulator.sample_situations_from_grounded_grammer(
copy.deepcopy(grammer_pattern),
copy.deepcopy(obj_pattern_map),
copy.deepcopy(rel_map),
copy.deepcopy(obj_map),
is_plot=False,
include_relation_distractor=args.include_relation_distractor,
include_attribute_distractor=args.include_attribute_distractor,
include_isomorphism_distractor=args.include_isomorphism_distractor,
include_random_distractor=args.include_random_distractor,
full_relation_probability=args.full_relation_probability,
debug=False
) # This is the minimum settings! You need to turn on attribute always!
else:
# Sample a shapeWorld!
sampled_world = simulator.sample_situations_from_grounded_grammer(
copy.deepcopy(grammer_pattern),
copy.deepcopy(obj_pattern_map),
copy.deepcopy(rel_map),
copy.deepcopy(obj_map),
is_plot=False,
include_relation_distractor=args.include_relation_distractor,
include_attribute_distractor=args.include_attribute_distractor,
include_isomorphism_distractor=args.include_isomorphism_distractor,
include_random_distractor=args.include_random_distractor,
full_relation_probability=args.full_relation_probability, # ReaSCAN Special: 15 distractors!
debug=False
)
# Validate the world is valid!
graph = ReaSCANGraph(
objects=sampled_world["obj_map"],
object_patterns=sampled_world["obj_pattern_map"],
vocabulary=vocabulary,
positions=sampled_world["pos_map"],
referred_object=sampled_world["referred_obj"],
debug=False
)
pattern_graph = ReaSCANGraph(
objects=obj_map,
object_patterns=None,
vocabulary=vocabulary,
relations=rel_map,
referred_object='$OBJ_0',
debug=False
)
potential_referent_target = graph.find_referred_object_super_fast(
pattern_graph, referred_object='$OBJ_0',
debug=False
)
# Save the result if the world is valid!
# This may be to strict, but it ensures 100% correct!
if len(potential_referent_target) == 1 and '$OBJ_0' in potential_referent_target:
# A quick world repeat check!
hash_world_str = hashlib.md5(str(sampled_world["situation"].to_representation()).encode('utf-8')).hexdigest()
if hash_world_str not in per_command_world_unique_check[command_struct_index]:
per_command_world_unique_check[command_struct_index].add(hash_world_str)
else:
continue # This is highly unlikely, but just to prevent!
# Form the command with grounded determiners!
obj_determiner_map = graph.find_determiners(
pattern_graph,
referred_object='$OBJ_0',
debug=False,
)
# we don't check this for P1 and P2?
# valid_determiner = True
# for k, v in obj_determiner_map.items():
# if k != '$OBJ_0':
# if v != "a":
# valid_determiner = False
# break
# if not valid_determiner:
# continue # we should abort and resample!
at_least_success = True
success_step += 1
command_str = grammer.repre_str_command(
grammer_pattern, rel_map, obj_map,
obj_determiner_map,
verb,
adverb,
)
# Form the golden label for the action list!
is_transitive = False
if verb in simulator.vocabulary.get_transitive_verbs():
is_transitive = True
# Direct walk.
action = "walk" # this is definit!
primitive_command = simulator.vocabulary.translate_word(action)
target_position = sampled_world["situation"].target_object.position
simulator._world.go_to_position(
position=target_position, manner=adverb,
primitive_command=primitive_command
)
# Object actions.
if is_transitive:
semantic_action = simulator.vocabulary.translate_word(verb)
simulator._world.move_object_to_wall(action=semantic_action, manner=adverb)
target_commands, _ = simulator._world.get_current_observations()
has_relation_distractor = False
full_relation_distractor = True
for rel_bool in sampled_world["distractor_switch_map"]["relation"]:
if rel_bool:
has_relation_distractor = True
else:
full_relation_distractor = False
# Save all relevant information for a task.
task_struct = OrderedDict({
"command": ",".join(command_str.split(" ")),
"grammer_pattern": grammer_pattern,
"meaning": ",".join(command_str.split(" ")),
"derivation": grammer_pattern,
"situation": sampled_world["situation"].to_representation(),
"target_commands": ",".join(target_commands),
"verb_in_command": verb,
"adverb_in_command": adverb,
"referred_target": obj_map["$OBJ_0"],
"object_pattern_map": obj_pattern_map,
"relation_map": [(k, v) for k, v in rel_map.items()],
"object_expression": obj_map,
"n_object": len(sampled_world["obj_map"]),
"n_distractor": len(sampled_world["obj_map"])-len(obj_map),
"full_relation_distractor": full_relation_distractor,
"has_relation_distractor": has_relation_distractor,
"has_attribute_distractor": sampled_world["distractor_switch_map"]["attribute"],
"has_isomorphism_distractor": sampled_world["distractor_switch_map"]["isomorphism"],
"has_random_distractor": True if sampled_world["n_random_distractor"] != -1 else False,
"n_random_distractor": sampled_world["n_random_distractor"] if sampled_world["n_random_distractor"] != -1 else 0,
"relation_distractor_metadata": sampled_world["relation_distractor_metadata"],
"attribute_distractor_metadata": sampled_world["attribute_distractor_metadata"],
"isomorphism_distractor_metadata": sampled_world["isomorphism_distractor_metadata"],
"random_distractor_metadata": sampled_world["random_distractor_metadata"],
})
# Record distractor related info
if task_struct["full_relation_distractor"]:
d_full_relation_count += 1
if task_struct["has_relation_distractor"]:
d_relation_count += 1
if task_struct["has_attribute_distractor"]:
d_attribute_count += 1
if task_struct["has_isomorphism_distractor"]:
d_iso_count += 1
if task_struct["n_random_distractor"]:
d_random_count += 1
# Here, we decide which split we put the example into!
split = args.mode
created_examples_by_splits[split].append(task_struct)
per_command_world_counts[command_struct_index] += 1
break # break the retry loop!
if not at_least_success:
logger.info(f"WARNING: the success rate for this command is close to 0.0%, skipping...")
break # success rate for this comman is ~= 0.0%, let us directly skip
if save_at_interval and (command_struct_index+1)% save_interal == 0:
logger.info(f"Saving data files and statistics to {args.output_dir} for checkpoints...")
# Now, we need to save data into the folder
# along with possible statistics.
to_save_command_struct = []
per_command_count = []
for command_struct_index, count in per_command_world_counts.items():
per_command_count += [count]
if count >= 1:
to_save_command_struct.append(sampled_command_struct_indexed[command_struct_index])
if save_command_stats:
_ = get_command_struct_statistics(
to_save_command_struct, run_name=f"ReaSCAN-{mode}", date=args.date,
split=mode,
compositional_split=False,
n_sample=-1,
output_dir=args.output_dir,
save_to_disk=True if args.output_dir != "" else False,
wandb=wandb
)
# wandb.log({"per_command_world_count": wandb.Histogram(per_command_count)})
data_file_path = os.path.join(args.output_dir, f"data-{args.mode}.txt")
if mode == "demo" or mode == "all" or mode == "train":
logger.info(f"total example count={success_step}...")
dataset_representation = {
"grid_size": args.grid_size,
"type_grammar": "ReaSCAN-Grammer",
"min_object_size": 1,
"max_object_size": 4,
"percentage_train": split_percentage["train"],
"examples": created_examples_by_splits,
"intransitive_verbs": intransitive_verbs,
"transitive_verbs": transitive_verbs,
"adverbs": adverbs,
"nouns": nouns,
"color_adjectives": color_adjectives,
"size_adjectives": size_adjectives,
"relative_pronouns": relative_pronouns,
"relation_clauses": relation_clauses,
}
# dump to the disk.
with open(data_file_path, "w") as fd:
json.dump(dataset_representation, fd, indent=4)
else:
pass
# Last round of saving!
logger.info(f"Saving FINAL data files and statistics to {args.output_dir}...")
# Now, we need to save data into the folder
# along with possible statistics.
to_save_command_struct = []
per_command_count = []
for command_struct_index, count in per_command_world_counts.items():
per_command_count += [count]
if count >= 1:
to_save_command_struct.append(sampled_command_struct_indexed[command_struct_index])
if save_command_stats:
_ = get_command_struct_statistics(
to_save_command_struct, run_name=f"ReaSCAN-{mode}", date=args.date,
split=mode,
compositional_split=False,
n_sample=-1,
output_dir=args.output_dir,
save_to_disk=True if args.output_dir != "" else False,
wandb=wandb
)
# wandb.log({"per_command_world_count": wandb.Histogram(per_command_count)})
data_file_path = os.path.join(args.output_dir, f"data-{args.mode}.txt")
if mode == "demo" or mode == "all" or mode == "train":
logger.info(f"total example count={success_step}...")
dataset_representation = {
"grid_size": args.grid_size,
"type_grammar": "ReaSCAN-Grammer",
"min_object_size": 1,
"max_object_size": 4,
"percentage_train": split_percentage["train"],
"examples": created_examples_by_splits,
"intransitive_verbs": intransitive_verbs,
"transitive_verbs": transitive_verbs,
"adverbs": adverbs,
"nouns": nouns,
"color_adjectives": color_adjectives,
"size_adjectives": size_adjectives,
"relative_pronouns": relative_pronouns,
"relation_clauses": relation_clauses,
}
# dump to the disk.
with open(data_file_path, "w") as fd:
json.dump(dataset_representation, fd, indent=4)
else:
pass
logger.info("==FINISH==")
if args.is_tensorboard:
# end wandb
wandb.finish()
```
| github_jupyter |
# Data Analysis
Here we need a `.csv` file in order to do the desired analysis.
```
import json
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import pandas as pd
import seaborn as sns
df = pd.read_csv('mergedDatalogger_formatted_data_nov1722.csv')
df.head()
df = pd.read_csv('mergedDatalogger_formatted_data_nov1722.csv')
df["datetime"] = pd.to_datetime(df["datetime"])
# df = df.set_index('datetime')
del df["Datetime"]
df["approx datetime"] = df["datetime"].round('1min') # redondeamos a los X mins más cercanos (X según frec. de los videos)
del df["Sigma T[sT]"]
# df = df.sort_index()
df.head(10)
```
## Gráficas Datos Datalogger
```
df.columns
df = pd.read_csv('mergedDatalogger_formatted_data_nov1722.csv')
df["datetime"] = pd.to_datetime(df["Datetime"])
plt.figure(figsize=(20,15))
plt.subplot(4,1,1)
plt.scatter(df['datetime'], df['D.O.[mg/L]'], s=0.1)
plt.subplot(4,1,2)
plt.scatter(df['datetime'], df['pH '], s=0.1)
plt.subplot(4,1,3)
plt.scatter(df['datetime'], df['Temp'], s=0.1)
plt.subplot(4,1,4)
plt.scatter(df['datetime'], df['EC Abs'], s=0.1)
# df = df.set_index('datetime')
# df[['Temp', 'pH ', 'D.O.[mg/L]', 'EC Abs']].plot(subplots=True, figsize=(10,10))
df = pd.read_csv('datalogger_formatted_data_nov17.csv')
df["datetime"] = pd.to_datetime(df["Datetime"])
df = df.set_index('datetime')
df[['Temp', 'pH ', 'D.O.[mg/L]', 'EC Abs']].plot(subplots=True, figsize=(10,10))
df = pd.read_csv('datalogger_formatted_data_nov22.csv')
df["datetime"] = pd.to_datetime(df["Datetime"])
df = df.set_index('datetime')
df[['Temp', 'pH ', 'D.O.[mg/L]', 'EC Abs uS/cm']].plot(subplots=True, figsize=(10,10))
start_time = df.head(1)['datetime']
end_time = df.tail(1)['datetime']
print(f"Data starts at: {start_time}")
print(f"Data ends at: {end_time}")
#understanding correlation
plt.figure(figsize = (15,9))
sns.heatmap(df.corr(), annot = True)
```
## Strongly Correlated Variables
```
sns.scatterplot(x=df["Dissolved Oxygen"], y=df["Ambient Temperature"], data=df)
sns.scatterplot(x=df["Humidity"], y=df["Ambient Temperature"], data=df)
sns.scatterplot(x=df["Humidity"], y=df["Dissolved Oxygen"], data=df)
scatter = sns.scatterplot(x=df["Water Temperature"], y=df["Total Dissolved Solids"], data=df)
scatter.set_ylim(top=400);
```
## Weakly/Not at all Corelated Variables
```
scatter = sns.scatterplot(x=df["Water Level"], y=df["Rain"], data=df)
scatter.set_xlim(left=25, right=40)
```
# Merging Video Data with Physicochemical Vars
```
df = pd.read_csv('mergedDatalogger_formatted_data_nov1722.csv')
df["datetime"] = pd.to_datetime(df["datetime"])
# df = df.set_index('datetime')
del df["Datetime"]
df["approx datetime"] = df["datetime"].round('1min') # redondeamos a los X mins más cercanos (X según frec. de los videos)
del df["Sigma T[sT]"]
df.head(10)
video_df = pd.read_csv('video_data_Nov1718_Nov2223.csv')
video_df["datetime"] = pd.to_datetime(video_df["timestamp"])
del video_df["timestamp"]
del video_df['Video Index']
video_df["approx datetime"] = video_df["datetime"].round('1min') # redondeamos a los X mins más cercanos (X según frec. de los videos)
# video_df = video_df.set_index('datetime')
video_df = video_df.sort_values('datetime', ignore_index=True)
video_df.head()
df.head()
merged_df = pd.merge(df, video_df, on="approx datetime")
del merged_df['datetime_x'] # (Original formatted_data_ timestamp) Column with the same name
del merged_df['datetime_y'] # (Original video_data timestamp) Column with the same name
merged_df.head()
#understanding correlation
plt.figure(figsize = (15,9))
sns.heatmap(merged_df.corr(), annot = True)
scatter = sns.scatterplot(x=merged_df["Average Pairwise Distance"], y=merged_df["Dissolved Oxygen"], data=df)
# scatter.set_ylim(top=400);
```
| github_jupyter |
# Forecasting with sktime - appendix: forecasting, supervised regression, and pitfalls in confusing the two
This notebook provides some supplementary explanation about the relation between forecasting as implemented in `sktime`, and the very common supervised prediction tasks as supported by `scikit-learn` and similar toolboxes.
Key points discussed in this notebook:
* forecasting is not the same as supervised prediction
* even though forecasting can be "solved" by algorithms for supervised prediction, this is indirect and requires careful composition
* from an interface perspective, this is correctly formulated as "reduction", i.e., use of a supervised predictor as a component within a forecaster
* there are a number of pitfalls if this is manually done - such as, over-optimistic performance evaluation, information leakage, or "predicting the past" type errors
```
# general imports
import numpy as np
import pandas as pd
```
## The pitfalls of mis-diagnosing forecasting as supervised regression
A common mistake is to mis-identify a forecasting problem as supervised regression - after all, in both we predict numbers, so surely this must be the same thing?
Indeed we predict numbers in both, but the set-up is different:
* in supervised regression, we predict *label/target variables* from *feature variables*, in a cross-sectional set-up. This is after training on label/feature examples.
* in forecasting, we predict *future values* from *past values*, of *the same variable*, in a temporal/sequential set-up. This is after training on the past.
In the common data frame representation:
* in supervised regression, we predict entries in a column from other columns. For this, we mainly make use of the statistical relation between those columns, leart from examples of complete rows. The rows are all assumed exchangeable.
* in forecasting, we predict new rows, assuming temporal ordering in the rows. For this, we mainly make use of the statistical relation between previous and subsequent rows, learnt from the example of the observed sequence of rows. The rows are not exchangeable, but in temporal sequence.
TODO: add a nice picture on what is predicted from what, arrows and all. Contributions are welcome.
### Pitfall 1: over-optimism in performance evaluation, false confidence in "broken" forecasters
Confusing the two tasks may lead to information leakage, and over-optimistic performance evaluation. This is because in supervised regression the ordering of rows does not matter, and train/test split is usually performed uniformly. In forecasting, the ordering does matter, both in training and in evaluation.
As subtle as it seems, this may have major practical consequences - since it can lead to the mistaken belief that a "broken" method is performant, which can cause damage to health, property, and other assets in real-life deployment.
The example below shows "problematic" performance estimation, when mistakenly using the regression evaluation workflow for forecasting.
```
from sklearn.model_selection import train_test_split
from sktime.datasets import load_airline
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.utils.plotting import plot_series
y = load_airline()
y_train, y_test = train_test_split(y)
plot_series(y_train.sort_index(), y_test.sort_index(), labels=["y_train", "y_test"]);
```
This leads to leakage:
> The data you are using to train a machine learning algorithm happens to have the information you are trying to predict.
But `train_test_split(y, shuffle=False)` works, which is what `temporal_train_test_split(y)` does in `sktime`:
```
y_train, y_test = temporal_train_test_split(y)
plot_series(y_train, y_test, labels=["y_train", "y_test"]);
```
### Pitfall 2: obscure data manipulations, brittle boilerplate code to apply regressors
It is common practice to apply supervised regressors after transforming the data for forecasting, through lagging - for example, in auto-regressive reduction strategies.
Two important pitfalls appear right at the start:
* a lot of boilerplate code has to be written to transform the data to make it ready for fitting - this is highly error prone
* there are a number of implicit hyper-parameters here, such as window and lag size. If done without caution, these are not explicit or tracked in the experiment, which can lead to "p-value hacking".
Below is an example of such boilerplate code to demonstrate this. The code is closely modelled on the R code used in the [M4 competition](https://github.com/Mcompetitions/M4-methods):
```
# suppose we want to predict 3 years ahead
fh = np.arange(1, 37)
# slightly modified code from the M4 competition
def split_into_train_test(data, in_num, fh):
"""
Splits the series into train and test sets.
Each step takes multiple points as inputs
:param data: an individual TS
:param fh: number of out of sample points
:param in_num: number of input points for the forecast
:return:
"""
train, test = data[:-fh], data[-(fh + in_num) :]
x_train, y_train = train[:-1], np.roll(train, -in_num)[:-in_num]
x_test, y_test = test[:-1], np.roll(test, -in_num)[:-in_num]
# x_test, y_test = train[-in_num:], np.roll(test, -in_num)[:-in_num]
# reshape input to be [samples, time steps, features]
# (N-NF samples, 1 time step, 1 feature)
x_train = np.reshape(x_train, (-1, 1))
x_test = np.reshape(x_test, (-1, 1))
temp_test = np.roll(x_test, -1)
temp_train = np.roll(x_train, -1)
for x in range(1, in_num):
x_train = np.concatenate((x_train[:-1], temp_train[:-1]), 1)
x_test = np.concatenate((x_test[:-1], temp_test[:-1]), 1)
temp_test = np.roll(temp_test, -1)[:-1]
temp_train = np.roll(temp_train, -1)[:-1]
return x_train, y_train, x_test, y_test
# here we split the time index, rather than the actual values,
# to show how we split the windows
feature_window, target_window, _, _ = split_into_train_test(
np.arange(len(y)), 10, len(fh)
)
```
To better understand the prior data transformation, we can look at how we can split the training series into windows. Here we show the generated windows expressed as integer indices:
```
feature_window[:5, :]
target_window[:5]
# now we can split the actual values of the time series
x_train, y_train, x_test, y_test = split_into_train_test(y.values, 10, len(fh))
print(x_train.shape, y_train.shape)
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
model.fit(x_train, y_train)
```
To reiterate the potential pitfalls here:
> The manual requires a lot of hand-written code which is often error-prone, not modular and not tuneable.
> These steps involve a number of implicit hyper-parameters:
> * the way you slice the time series into windows (e.g. the window length)
> * the way you generate forecasts (recursive strategy, direct strategy, other hybrid strategies)
### Pitfall 3: Given a fitted regression algorithm, how can we generate forecasts?
The next important pitfall comes at the end:
if making predictions along the "manual route" for supervised regressors, the supervised regressor's outputs have to be transformed back into forecasts. This is easily forgotten, and invites errors in forecasts and evaluation (see pitfall no.1) - especially, if one does not cleanly keep track of which data is known at what time, or how to invert the transformation made in fitting.
A naive user might now proceed like this:
```
print(x_test.shape, y_test.shape)
# add back time index to y_test
y_test = pd.Series(y_test, index=y.index[-len(fh) :])
y_pred = model.predict(x_test)
from sktime.performance_metrics.forecasting import mean_absolute_percentage_error
mean_absolute_percentage_error(
y_test, pd.Series(y_pred, index=y_test.index), symmetric=False
)
```
So easy, so wrong ... but what's the problem here? It's a bit subtle and not easy to spot:
> We actually don't make a multi-step-ahead forecast up to the 36th step ahead. Instead, we make 36 single-step-ahead forecasts always using the most recent data. But that's a solution to a different learning task!
To fix this problem, we could write some code to do this recursively as in the M4 competition:
```
# slightly modified code from the M4 study
predictions = []
last_window = x_train[-1, :].reshape(1, -1) # make it into 2d array
last_prediction = model.predict(last_window)[0] # take value from array
for i in range(len(fh)):
# append prediction
predictions.append(last_prediction)
# update last window using previously predicted value
last_window[0] = np.roll(last_window[0], -1)
last_window[0, (len(last_window[0]) - 1)] = last_prediction
# predict next step ahead
last_prediction = model.predict(last_window)[0]
y_pred_rec = pd.Series(predictions, index=y_test.index)
from sktime.performance_metrics.forecasting import mean_absolute_percentage_error
mean_absolute_percentage_error(
y_test, pd.Series(y_pred_rec, index=y_test.index), symmetric=False
)
```
To summarize the potential pitfalls here:
> Obtaining regressor predictions and converting them back into forecasts is non-trivial and error prone:
> * some boilerplate code needs to be written, which just as in pitfall no.2 introduces potential for problems
> * it isn't exactly obvious that this boilerplate code had to be written in the first place, creating a subtle failure point
### How does `sktime` help avoid the above pitfalls?
`sktime` mitigates the above pitfalls by:
* the unified interface for forecasters - any strategy to produce forecasts is a forecaster. Through the unified interface, forecasters are directly compatible with deployment and evaluation workflows appropriate for forecasters.
* its declarative specification interface that minimizes boilerplate code - it's minimized to the bare necessities to tell `sktime` which forecaster you want to build
Nevertheless, `sktime` aims to be flexible, and tries to avoid to railroad the user into specific methodological choices.
```
from sklearn.neighbors import KNeighborsRegressor
from sktime.forecasting.compose import make_reduction
# declarative forecaster specification - just two lines!
regressor = KNeighborsRegressor(n_neighbors=1)
forecaster = make_reduction(regressor, window_length=15, strategy="recursive")
forecaster.fit(y_train)
y_pred = forecaster.predict(fh)
```
... and that's it!
Note that there is no `x_train` or other boilerplate artefacts, since construction of the lagged features and other boilerplate code are taken care of by the forecaster internally.
For more details on the `sktime` composition interface, refer to Section 3 of the main forecasting tutorial.
| github_jupyter |
```
import gym
import numpy as np
import math
```
Description:
There are four designated locations in the grid world indicated by R(ed), G(reen), Y(ellow), and B(lue). When the episode starts, the taxi starts off at a random square and the passenger is at a random location. The taxi drives to the passenger's location, picks up the passenger, drives to the passenger's destination (another one of the four specified locations), and then drops off the passenger. Once the passenger is dropped off, the episode ends.
Observations:
There are 500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger (including the case when the passenger is in the taxi), and 4 destination locations.
Note that there are 400 states that can actually be reached during an episode. The missing states correspond to situations in which the passenger is at the same location as their destination, as this typically signals the end of an episode.
Four additional states can be observed right after a successful episodes, when both the passenger and the taxi are at the destination.
This gives a total of 404 reachable discrete states.
Passenger locations:
- 0: R(ed)
- 1: G(reen)
- 2: Y(ellow)
- 3: B(lue)
- 4: in taxi
Destinations:
- 0: R(ed)
- 1: G(reen)
- 2: Y(ellow)
- 3: B(lue)
Actions:
There are 6 discrete deterministic actions:
- 0: move south
- 1: move north
- 2: move east
- 3: move west
- 4: pickup passenger
- 5: drop off passenger
Rewards:
There is a default per-step reward of -1,
except for delivering the passenger, which is +20,
or executing "pickup" and "drop-off" actions illegally, which is -10.
Rendering:
- blue: passenger
- magenta: destination
- yellow: empty taxi
- green: full taxi
- other letters (R, G, Y and B): locations for passengers and destinations
```
env = gym.make("Taxi-v3")
q_table = np.zeros([env.observation_space.n, env.action_space.n])
env.render()
env.reset()
"""Training the agent"""
import random
from IPython.display import clear_output
# Hyperparameters
alpha = 0.1
gamma = 0.6
epsilon = 0.1
# For plotting metrics
all_epochs = []
all_penalties = []
for i in range(1, 100001):
state = env.reset()
epochs, penalties, reward, = 0, 0, 0
done = False
while not done:
if random.uniform(0, 1) < epsilon:
action = env.action_space.sample() # Explore action space
else:
action = np.argmax(q_table[state]) # Exploit learned values
next_state, reward, done, info = env.step(action)
old_value = q_table[state, action]
next_max = np.max(q_table[next_state])
new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)
q_table[state, action] = new_value
if reward == -10:
penalties += 1
state = next_state
epochs += 1
if i % 100 == 0:
clear_output(wait=True)
print(f"Episode: {i}")
print("Training finished.\n")
q_table[328]
"""Evaluate agent's performance after Q-learning"""
total_epochs, total_penalties = 0, 0
episodes = 5
for _ in range(episodes):
state = env.reset()
epochs, penalties, reward = 0, 0, 0
done = False
while not done:
action = np.argmax(q_table[state])
state, reward, done, info = env.step(action)
env.render()
if reward == -10:
penalties += 1
epochs += 1
total_penalties += penalties
total_epochs += epochs
print(f"Results after {episodes} episodes:")
print(f"Average timesteps per episode: {total_epochs / episodes}")
print(f"Average penalties per episode: {total_penalties / episodes}")
```
| github_jupyter |
<h1><center>DBSCAN: A macroscopic investigation in Python</center></h1><br>
Cluster analysis is an important problem in data analysis. Data scientists use clustering to identify malfunctioning servers, group genes with similar expression patterns, or various other applications.
Briefly, clustering is the task of grouping together a set of objects in a way that objects in the same cluster are more similar to each other than to objects in other clusters. Similarity is an amount that reflects the strength of a relationship between two data objects. Clustering is mainly used for exploratory data mining. Clustering has manifold usage in many fields such as machine learning, pattern recognition, image analysis, information retrieval, bio-informatics, data compression, and computer graphics.
There are many families of clustering techniques, and you may be familiar with the most popular one: K-Means (which belongs to the *family of centroid-based clustering*). As a quick refresher, K-Means determines k centroids in the data and clusters points by assigning them to the nearest centroid.
While K-Means is easy to understand and implement in practice, the algorithm does not take care of outliers, so all points are assigned to a cluster even if they do not belong in any. In the domain of anomaly detection, this causes problems as anomalous points will be assigned to the same cluster as “normal” data points. The anomalous points pull the cluster centroid towards them, making it harder to classify them as anomalous points.
This tutorial will cover another type of clustering technique known as density-based clustering specifically DBSCAN (a density-based based clustering technique). Compared to centroid-based clustering like K-Means, density-based clustering works by identifying “dense” clusters of points, allowing it to learn clusters of arbitrary shape and identify outliers in the data.
<h2>In this post you will get to know about:</h2>
* Disadvantage of centroid-based clustering technique
* General introduction to density-based clustering technique
* Inner workings of DBSCAN
* A simple case study of DBSCAN in Python
* Applications of DBSCAN
<h3>Disadvantage of centroid-based clustering technique: </h3>
Before discussing the disadvantage of centroid-based clustering, let me give a brief introduction to it. A centroid is a data point (imaginary or real) at the center of a cluster. In centroid-based clustering, clusters are represented by a central vector or a centroid. This centroid might not necessarily be a member of the dataset. Centroid-based clustering is an iterative clustering algorithm in which the notion of similarity is derived by how close a data point is to the centroid of the cluster. <br><br>
Sometimes a dataset can contain extreme values that are outside the range of what is expected and unlike the other data. These are called outliers. More formally, an outlier is an observation that lies an abnormal distance from other values in a random sample from a population.
The main fundamental of centroid-based clustering techniques is driven by distance measurements between the data points and centroids. Therefore, centroid-based clustering techniques generally fail to identify the data points that deviate from the normal distribution of the data to a great extent. Even before predictive models are prepared on data, outliers can result in misleading representations and in turn misleading interpretations of collected data. This is essentially not desirable for building efficient predictive and analytical models from data. <br><br>
You can consider the following two taller bars (than rest of the bars) as outliers in that particular data:
<center>

</center>
<h3>General introduction to density-based clustering technique:</h3>
Before discussing density-based clustering, you first need to cover a topic : ɛ-neighborhoods.
The general idea behind ɛ-neighborhoods is given a data point, you want to be able to reason about the data points in the space around it. Formally, for some real-valued ɛ > 0 and some point p, the ɛ-neighborhood of p is defined as the set of points that are at most distance ɛ away from p.
If you think back to geometry, the shape in which all points are equidistant from the center is the circle. In 2D space, the ɛ-neighborhood of a point p is the set of points contained in a circle of radius ɛ, centered at p. In 3D space, the ɛ-neighborhood is a sphere of radius ɛ, centered at p, and in higher dimensional space, the ɛ-neighborhood is just the [N-sphere](https://en.wikipedia.org/wiki/N-sphere) of radius ɛ, centered at p.
Let’s consider an example to make this idea more concrete.
In the image below 100 data points are scattered in the interval [1,3]X[2,4]. Let’s pick the point (3,2) to be our point p.
<center>

</center>
First, let’s consider the neighborhood of p with radius 0.5 (ɛ = 0.5), the set of points that are distance 0.5 away from p.
<center>

</center>
The opaque green oval represents our neighborhood, and there are 31 data points in this neighborhood. Since 100 data points were scattered and 31 are in the neighborhood, this means that a little under one-third of the data points are contained within the neighborhood of p with radius 0.5.
Now, let’s change our radius to 0.15 (ɛ = 0.15) and consider the resulting smaller neighborhood.
<center>

</center>
Now the neighborhood is shrunk a bit, so now only 3 data points are contained within it. By decreasing ɛ from 0.5 to 0.15 (a 70% reduction), the number of points is decreased in our neighborhood from 31 to 3 (a 90% reduction).
Now that you have a fair understanding of “neighborhood”, I will introduce the next important concept: the notion of a “density” for a neighborhood (You are proceeding towards learning “density-based clustering", after all).
In a grade-school science class, children are taught that density = mass/volume. Let’s use this idea of mass divided by volume to define density at some point p. If you consider some point p and its neighborhood of radius ɛ, you can define the mass of the neighborhood as the number of data points (or alternatively, the fraction of data points) contained within the neighborhood, and the volume of the neighborhood is volume of the resulting shape of the neighborhood. In the 2D case, the neighborhood is a circle, so the volume of the neighborhood is just the area of the resulting circle. In the 3D and higher dimensional case, the neighborhood is a sphere or n-sphere, so you can calculate the volume of this shape.
For example, let’s consider our neighborhood of p = (3,2) of radius 0.5 again.
<center>

</center>
The mass is the number of data points in the neighborhood, so mass = 31. The volume is the area of the circle, so volume = π0.5<sup>2</sup> = π/4. Therefore, our local density approximation at * p = (3,2) is calculated as density = mass/volume = 31/(π/4) = 124/π ~= 39.5.
This value is meaningless by itself, but if you calculate the local density approximation for all points in our dataset, you could cluster our points by saying that points that are nearby (contained in the same neighborhood) and have similar local density approximations belong in the same cluster. If you decrease the value of ɛ, you can construct smaller neighborhoods (less volume) that would also contain fewer data points. Ideally, you want to identify highly dense neighborhoods where most of the data points are contained in these neighborhoods, but the volume of each of these neighborhoods is relatively small.<br>
While this is not exactly what either DBSCAN or the Level Set Tree algorithm (another clustering technique belonging to the family of density-based clustering) does, it forms the general intuition behind density-based clustering.<br>
To recap, you covered the ɛ-neighborhoods and how they allow to reason about the space around a particular point. Then you learnt a notion of density at a particular point for a particular neighborhood. In the next section, you will get to know the DBSCAN algorithm where the ɛ-ball is a fundamental tool for defining clusters.
<h3>Inner workings of DBSCAN:</h3>
DBSCAN stands for Density-Based Spatial Clustering of Applications with Noise and it is hands down the most well-known density-based clustering algorithm. It was first introduced by first introduced in 1996 by [Ester et. al](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1980). Due to its importance in both theory and applications, this algorithm is one of three algorithms awarded the Test of Time Award at SIGKDD 2014.
Unlike K-Means, DBSCAN does not require the number of clusters as a parameter. Rather it infers the number of clusters based on the data, and it can discover clusters of arbitrary shape (for comparison, K-Means usually discovers spherical clusters). As you saw earlier, the ɛ-neighborhood is fundamental to DBSCAN to approximate local density, so the algorithm has two parameters:
* ɛ: The radius of our neighborhoods around a data point p.
* minPts: The minimum number of data points you want in a neighborhood to define a cluster.
Using these two parameters, DBSCAN categories the data points into three categories:
* Core Points: A data point p is a core point if Nbhd(p,ɛ) [ɛ-neighborhood of p] contains at least minPts ; |Nbhd(p,ɛ)| >= minPts.
* Border Points: A data point *q is a border point if Nbhd(q, ɛ) contains less than minPts data points, but q is reachable from some core point p.
* Outlier: A data point o is an outlier if it is neither a core point nor a border point. Essentially, this is the “other” class.
These definitions may seem abstract, so let’s cover what each one means in more detail.
<b>Core Points: </b><br>
Core Points are the foundations for our clusters are based on the density approximation I discussed in the previous section. You use the same ɛ to compute the neighborhood for each point, so the volume of all the neighborhoods is the same. However, the number of other points in each neighborhood is what differs. Recall that I said you can think of the number of data points in the neighborhood as its mass. The volume of each neighborhood is constant, and the mass of neighborhood is variable, so by putting a threshold on the minimum amount of mass needed to be core point, you are essentially setting a minimum density threshold. Therefore, core points are data points that satisfy a minimum density requirement. Our clusters are built around our core points (hence the core part), so by adjusting our minPts parameter, you can fine-tune how dense our clusters cores must be.
<b>Border Points:</b><br>
Border Points are the points in our clusters that are not core points. In the definition above for border points, I used the term density-reachable. I have not defined this term yet, but the concept is simple. To explain this concept, let’s revisit our neighborhood example with epsilon = 0.15. Consider the point r (the black dot) that is outside of the point p‘s neighborhood.
<center>

</center>
All the points inside the point p‘s neighborhood are said to be directly reachable from p. Now, let’s explore the neighborhood of point q, a point directly reachable from p. The yellow circle represents q‘s neighborhood.
<center>

</center>
Now while your target point r is not your starting point p‘s neighborhood, it is contained in the point q‘s neighborhood. This is the idea behind density-reachable: If you can get to the point r by jumping from neighborhood to neighborhood, starting at a point p, then the point r is density-reachable from the point p.
<center>

</center>
As an analogy, you can think of density-reachable points as being the “friends of a friend”. If the directly-reachable of a core point p are its “friends”, then the density-reachable points, points in neighborhood of the “friends” of p, are the “friends of its friends”. One thing that may not be clear is density-reachable points is not limited to just two adjacent neighborhood jumps. As long as you can reach the point doing “neighborhood jumps”, starting at a core point p, that point is density-reachable from p, so “friends of a friend of a friend … of a friend” are included as well. <br>
It is important to keep in mind that this idea of density-reachable is dependent on our value of ɛ. By picking larger values of ɛ, more points become density-reachable, and by choosing smaller values of ɛ, fewer points become density-reachable.
<b>Outliers:</b><br>
Finally, you get to the “other” class. Outliers are points that are neither core points nor are they close enough to a cluster to be density-reachable from a core point. Outliers are not assigned to any cluster and, depending on the context, may be considered anomalous points.
<h3>Case study of DBSCAN in Python:</h3><br>
DBSCAN is already beautifully implemented in the popular Python machine learning library *Scikit-Learn*, and because this implementation is scalable and well-tested, you will be using it to see how DBSCAN works in practice.
The steps to the DBSCAN algorithm are:
* Pick a point at random that has not been assigned to a cluster or been designated as an outlier. Compute its neighborhood to determine if it’s a core point. If yes, start a cluster around this point. If no, label the point as an outlier.
* Once we find a core point and thus a cluster, expand the cluster by adding all directly-reachable points to the cluster. Perform “neighborhood jumps” to find all density-reachable points and add them to the cluster. If an outlier is added, change that point’s status from outlier to border point.
* Repeat these two steps until all points are either assigned to a cluster or designated as an outlier.
For this case study purpose you will be using [a dataset consisting of annual customer data for a wholesale distributor](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers).
So, let's get started.
```
# Let's import all your dependencies first
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
The dataset consists of 440 customers and has 8 attributes for each of these customers. You will use the Pandas library to import the .csv file and convert it into a DataFrame object.
Now while importing your .csv file into the , make sure you supply the accurate path of that file.
```
# Import .csv file and convert it to a DataFrame object
df = pd.read_csv("C:/Users/Sayak/data/customers.csv");
print(df.head())
```
Now before proceeding any further with applying DBSCAN, it is very important that you know the data well so as know what kind of data is in the dataset, what distribution the data follows, and which features are numerical or not.
According to the description given in the official [UCI machine learning repository of this dataset](https://archive.ics.uci.edu/ml/datasets/wholesale+customers), information about the features of the dataset is as follows:
<li>
FRESH: annual spending (m.u.) on fresh products (Continuous); </li>
<li>MILK: annual spending (m.u.) on milk products (Continuous); </li>
<li>GROCERY: annual spending (m.u.)on grocery products (Continuous); </li>
<li>FROZEN: annual spending (m.u.)on frozen products (Continuous) </li>
<li>DETERGENTS_PAPER: annual spending (m.u.) on detergents and paper products (Continuous) </li>
<li>DELICATESSEN: annual spending (m.u.)on and delicatessen products (Continuous); </li>
<li>CHANNEL: customers’ Channel - Horeca (Hotel/Restaurant/Café) or Retail channel (Nominal)
REGION
</li>
Now that you know about the features about the dataset, let's display some stats of the data.
```
print(df.info())
```
As you can see from the above output, there is no missing value in the dataset and all the data is *integer* in type. This reduces the burden of further preprocessing the data. Let's dig a bit more.
```
print(df.describe())
```
From the above output, you can derive all the necessary statistical measures like standard deviation, mean, max of each and every feature present in the dataset. You can see most of the data in this dataset is *[continuous](https://stats.stackexchange.com/questions/206/what-is-the-difference-between-discrete-data-and-continuous-data)* in nature except for two features: Channel and Region. So for easing your computations, you will drop these two:
```
df.drop(["Channel", "Region"], axis = 1, inplace = True)
# Let's get a view of the data after the drop
print(df.head())
```
So you can visualize the data, for that you are going to use two of the features:
* Groceries: The customer’s annual spending (in some monetary unit) on grocery products.
* Milk: The customer’s annual spending (in some monetary unit) on milk products.
```
# Let's plot the data now
x = df['Grocery']
y = df['Milk']
plt.scatter(x,y)
plt.xlabel("Groceries")
plt.ylabel("Milk")
plt.show()
```
Let's brief about the functions that you used for the plotting purpose:
plt.scatter() : This function actually creates the scatter plot based on the data (as parameters that you supply [*x* and *y*]).
plt.xlabel() : It helps you to put a label along the *X-axis*. (*Groceries* in this case)
plt.ylabel() : It helps you to put a label along the *Y-axis*. (*Milk* in this case)
plt.show() : After the plot is created, this function helps you to display it as the output.
You should really explore the beautiful world of *Matplotlib* for all your visualization purposes. Its [documentation](https://matplotlib.org/) is absolutely awesome.
You can easily spot the data points that are far astray. Right? Well, those are your outliers.
With DBSCAN, we want to identify this main cluster of customers, but we also want to flag customers with more unusual annual purchasing habits as outliers.
Because the values of the data are in the thousands, you are going to normalize each attribute by scaling it to 0 mean and unit variance. What is does basically is it helps to keep the inter-relationships between the features intact so that a small change in one feature would reflect in the other.
```
df = df[["Grocery", "Milk"]]
df = df.as_matrix().astype("float32", copy = False)
stscaler = StandardScaler().fit(df)
df = stscaler.transform(df)
```
You will construct a DBSCAN object that requires a minimum of 15 data points in a neighborhood of radius 0.5 to be considered a core point.
```
dbsc = DBSCAN(eps = .5, min_samples = 15).fit(df)
```
Next, we can extract our cluster labels and outliers to plot our results.
```
labels = dbsc.labels_
core_samples = np.zeros_like(labels, dtype = bool)
core_samples[dbsc.core_sample_indices_] = True
```

Lining up with the intuition, the DBSCAN algorithm was able to identify one cluster of customers who are around the mean grocery and mean milk product purchases. In addition, it was able to flag customers whose annual purchasing behavior deviated too heavily from other customers.
Because the outliers corresponded to customers with more extreme purchasing behavior, the wholesale distributor could specifically target these customers with exclusive discounts to encourage larger purchases.
<h3>Real life applications of DBSCAN:</h3>
* Suppose we have an e-commerce and we want to improve our sales by recommending relevant products to our customers. We don’t know exactly what our customers are looking for but based on a data set we can predict and recommend a relevant product to a specific customer. We can apply the DBSCAN to our data set (based on the e-commerce database) and find clusters based on the products that the users have bought. Using this clusters we can find similarities between customers, for example, if customer A has bought a pen, a book and one pair scissors, while customer B purchased a book and one pair of scissors, then you could recommend a pen to customer B.
* Before the rise of deep learning based advanced methodologies, researchers used DBSCAN in order to segregate genes from a genes dataset that had the chance of mediating cancer.
* Scientists have used DBSCAN in order to detect the stops in the trajectory data generated from mobile GPS devices. Stops represent the most meaningful and most important part of a trajectory.
<h3>Conclusion:</h3>
So, in this blogpost you got to know about the prime disadvantages of centroid-based clustering and got familiar with another family of clustering techniques i.e. density-based clustering. You also saw how they overcome the shortcomings of centroid-based clustering.
You learnt how DBSCAN works and also did a case study of it. Besides, you got a fair overview of the real life problems where DBSCAN has been incorporated for solving them. As a further reading, I would really recommend you all go through the other density-based clustering methods like *Level Set Tree clustering* and how it is different from DBSCAN.
<h4>References:</h4>
* Martin Ester, Hans-Peter Kriegel, Jörg Sander, and Xiaowei Xu. 1996. A density-based algorithm for discovering clusters a density-based algorithm for discovering clusters in large spatial databases with noise. In Proceedings of the Second International Conference on Knowledge Discovery and Data Mining (KDD'96), Evangelos Simoudis, Jiawei Han, and Usama Fayyad (Eds.). AAAI Press 226-231.
* https://towardsdatascience.com/how-dbscan-works-and-why-should-i-use-it-443b4a191c80
* https://www.coursera.org/learn/predictive-analytics/lecture/EVHfy/dbscan
| github_jupyter |
```
import os, sys
sys.path.append(os.path.abspath('../..'))
import matplotlib.pyplot as plt
import floris.tools as wfct
# Initialize the FLORIS interface fi
# For basic usage, the florice interface provides a simplified interface to
# the underlying classes
fi = wfct.floris_interface.FlorisInterface("../../examples/example_input.json")
# Calculate wake
fi.calculate_wake()
# Get horizontal plane at default height (hub-height)
hor_plane = fi.get_hor_plane()
# Plot and show
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
plt.show()
# Initialize the FLORIS interface fi
fi = wfct.floris_interface.FlorisInterface("../../examples/example_input.json")
# Set to 2x2 farm
fi.reinitialize_flow_field(layout_array=[[0, 0, 600, 600], [0, 300, 0, 300]])
# Change turbine 0 and 3 to have a 35 m rotor diameter
fi.change_turbine([0, 3], {"rotor_diameter": 35})
# Calculate wake
fi.calculate_wake()
# Get horizontal plane at default height (hub-height)
hor_plane = fi.get_hor_plane()
# Plot and show
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
plt.show()
fi = wfct.floris_interface.FlorisInterface("../../examples/example_input.json")
# Declare a short-cut visualization function for brevity in this example
def plot_slice_shortcut(fi, ax, title):
# Get horizontal plane at default height (hub-height)
hor_plane = fi.get_hor_plane()
ax.set_title(title)
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax, minSpeed=4.0, maxSpeed=8.0)
# Define a plot
fig, axarr = plt.subplots(3, 3, sharex=True, figsize=(12, 5), dpi=120)
axarr = axarr.flatten()
# Plot the initial setup
fi.calculate_wake()
plot_slice_shortcut(fi, axarr[0], "Initial")
# Change the wind speed
fi.reinitialize_flow_field(wind_speed=10.0)
fi.calculate_wake()
plot_slice_shortcut(fi, axarr[1], "WS=10")
# Change the wind direction
fi.reinitialize_flow_field(wind_direction=320.0)
fi.calculate_wake()
plot_slice_shortcut(fi, axarr[2], "WD=320")
# Change the TI
fi.reinitialize_flow_field(turbulence_intensity=0.15)
fi.calculate_wake()
plot_slice_shortcut(fi, axarr[3], "TI=15%")
# Change the shear
fi.reinitialize_flow_field(wind_shear=0.2)
fi.calculate_wake()
plot_slice_shortcut(fi, axarr[4], "Shear=0.2")
# Change the veer
fi.reinitialize_flow_field(wind_veer=5) # TODO IS THIS RIGHT?
fi.calculate_wake()
plot_slice_shortcut(fi, axarr[5], "Veer=5")
# Change the air density
fi.reinitialize_flow_field(air_density=1.0) # TODO IS THIS RIGHT?
fi.calculate_wake()
plot_slice_shortcut(fi, axarr[6], "Air Density=1.0")
# Change the farm layout
fi.reinitialize_flow_field(layout_array=[[0, 500, 1000], [0, 0, 0]]) # TODO IS THIS RIGHT?
fi.calculate_wake()
plot_slice_shortcut(fi, axarr[7], "Change layout")
wfct.visualization.plot_turbines_with_fi(axarr[7], fi)
# Changes the yaw angles
fi.calculate_wake(yaw_angles=[25, 25, 25])
plot_slice_shortcut(fi, axarr[8], "Change yaw angles")
wfct.visualization.plot_turbines_with_fi(axarr[8], fi)
plt.show()
# Initialize the FLORIS interface fi
fi = wfct.floris_interface.FlorisInterface("../../examples/example_input.json")
# Show the current model parameters
print("All the model parameters and their current values:\n")
fi.show_model_parameters()
print("\n")
# Show the current model parameters with docstring info
print("All the model parameters, their current values, and docstrings:\n")
fi.show_model_parameters(verbose=True)
print("\n")
# Show a specific model parameter with its docstring
print("A specific model parameter, its current value, and its docstring:\n")
fi.show_model_parameters(params=["ka"], verbose=False)
print("\n")
# Get the current model parameters
model_params = fi.get_model_parameters()
print("The current model parameters:\n")
print(model_params)
print("\n")
# Set parameters on the current model
print("Set specific model parameters on the current wake model:\n")
params = {
"Wake Velocity Parameters": {"alpha": 0.2},
"Wake Deflection Parameters": {"alpha": 0.2},
"Wake Turbulence Parameters": {"ti_constant": 1.0},
}
fi.set_model_parameters(params)
print("\n")
# Check that the parameters were changed
print("Observe that the requested paremeters changes have been made:\n")
model_params = fi.get_model_parameters()
print(model_params)
print("\n")
# Initialize the FLORIS interface for 4 seperate models defined as JSONS
fi_jensen = wfct.floris_interface.FlorisInterface("../../examples/other_jsons/jensen.json")
fi_turbopark = wfct.floris_interface.FlorisInterface("../../examples/other_jsons/turbopark.json")
fi_mz = wfct.floris_interface.FlorisInterface("../../examples/other_jsons/multizone.json")
fi_gauss = wfct.floris_interface.FlorisInterface("../../examples/other_jsons/input_legacy.json")
fi_gch = wfct.floris_interface.FlorisInterface("../../examples/example_input.json")
fig, axarr = plt.subplots(2, 5, figsize=(16, 4))
# Use a python for loop to iterate over the models and plot a horizontal cut through
# of the models for an aligned and yaw case to show some differences
for idx, (fi, name) in enumerate(
zip(
[fi_jensen,fi_turbopark, fi_mz, fi_gauss, fi_gch],
["Jensen", "TurbOPark", "Multizone", "Gaussian", "GCH"]
)
):
# Aligned case
fi.calculate_wake(yaw_angles=[0])
ax = axarr[0, idx]
hor_plane = fi.get_hor_plane()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax, minSpeed=4, maxSpeed=8)
ax.set_title(name)
axarr[0, 0].set_ylabel("Aligned")
# Yawed case
fi.calculate_wake(yaw_angles=[25])
ax = axarr[1, idx]
hor_plane = fi.get_hor_plane()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax, minSpeed=4, maxSpeed=8)
axarr[1, 0].set_ylabel("Yawed")
# Show the figure
plt.show()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image
from IPython.display import Image as im
%matplotlib inline
data = pd.read_csv("../data/StockX-Data-Consolidated.csv")
data['week_since_release'] = (data['Days Since Release']/7).round(1)
data.columns[21:32]
## Get brands and regions
def get_brand(row):
for brand in data.columns[4:14]:
if row[brand] == 1:
return brand
def get_region(row):
for region in data.columns[14:20]:
if row[region] == 1:
return region
def get_col(row):
for color in data.columns[21:32]:
if row[color] == 1:
return color
data['brand'] = data.apply(get_brand, axis=1)
data['region'] = data.apply(get_region, axis=1)
data['color'] = data.apply(get_col, axis=1)
timing = data[['Days Since Release',"week_since_release",'region', "brand",'color','Pct_change']]
timing = timing.rename(columns = {'Days Since Release':"days_since_release"})
np.random.seed(19680801)
N = 99956
colors = np.random.rand(N)
area = (50 * np.random.rand(N))**2
plt.scatter(x = timing['week_since_release'], y = timing['Pct_change'], c=colors, alpha=0.5)
plt.title('Price premium on Weeks since release')
plt.xlabel('weeks since release')
plt.ylabel('price premium')
plt.show()
timing.drop_duplicates(["week_since_release",'region'], inplace=True)
pivot = timing.pivot(index='region', columns='week_since_release', values='Pct_change',)
ax = sns.heatmap(pivot,annot=True,cmap = 'YlGnBu')
plt.show()
df1 = timing[["week_since_release",'region','Pct_change']]
heatmap1_data = pd.pivot_table(df1,values='Pct_change', index=['region'], columns='week_since_release')
heatmap1_data.head(n=5)
sns.heatmap(heatmap1_data, cmap="BuGn")
fig, ax = plt.subplots()
sc = ax.scatter(timing.region,timing.week_since_release, c=timing.Pct_change, cmap="YlGnBu")
fig.colorbar(sc, ax=ax)
plt.show()
fig, ax = plt.subplots()
sc = ax.scatter(timing.brand,timing.week_since_release, c=timing.Pct_change, cmap="YlGnBu")
fig.colorbar(sc, ax=ax)
plt.figure(figsize=(20, 60))
plt.show()
```
## Nike off-white days/weeks since release
```
offwhite= timing.loc[timing['brand'] != 'yeezy']
ow_nowhite = offwhite.loc[offwhite['color'] != 'White']
ow_white = offwhite.loc[offwhite['color'] == 'White']
ow_color = ow_nowhite.groupby(['color'])
img = plt.imread('../data/media/nike.jpg')
# Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-80, 800, 0, 8), zorder=-1,alpha = 0.5)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap2[-1],cmap1[7],cmap1[4],'brown']
for i, (name, group) in enumerate(ow_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.2)
ax.spines['bottom'].set_color('white')
ax.xaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.spines['left'].set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='y', colors='white')
#ax.patch.set_visible(False)
plt.title('Nike: Off-White', fontsize = 'large', color = 'white' )
plt.xlabel('Days Since Release', color = 'white' )
plt.ylabel('Price Premium', color = 'white')
plt.legend()
plt.show()
offwhite['brand'].value_counts(sort=True, ascending=False, bins=None, dropna=True)
## Nike Off white Blazer
aj = offwhite.loc[offwhite['brand'] == 'airjordan']
aj_color = aj.groupby(['color'])
presto = offwhite.loc[offwhite['brand'] == 'presto']
presto_color = presto.groupby(['color'])
zoom = offwhite.loc[offwhite['brand'] == 'zoom']
zoom_color = zoom.groupby(['color'])
blazer = offwhite.loc[offwhite['brand'] == 'blazer']
blazer_color = blazer.groupby(['color'])
af = offwhite.loc[offwhite['brand'] == 'airforce']
af_color = af.groupby(['color'])
# AJ Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-20, 500, -2, 8), zorder=-1,alpha = 0.4)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[0],cmap2[-1],cmap1[7],cmap1[4],'brown']
for i, (name, group) in enumerate(aj_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.4)
plt.title('Nike: Off-White Air Jordan', fontsize = 'large', color = 'white')
plt.xlabel('Days Since Release', color = 'white')
plt.ylabel('Price Premium', color = 'white')
ax.spines['bottom'].set_color('white')
ax.xaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.spines['left'].set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='y', colors='white')
plt.legend()
plt.show()
# Zoom Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-20, 500, -2, 8), zorder=-1,alpha = 0.4)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap1[7],cmap1[4],cmap1[0]]
for i, (name, group) in enumerate(zoom_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.3)
plt.title('Nike: Off-White Zoom', fontsize = 'large', color = 'white')
plt.xlabel('Days Since Release', color = 'white')
plt.ylabel('Price Premium', color = 'white')
ax.spines['bottom'].set_color('white')
ax.xaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.spines['left'].set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='y', colors='white')
plt.legend()
plt.show()
# Presto Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-20, 500, 0, 8), zorder=-1,alpha = 0.4)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap1[0],cmap1[7],cmap1[4],'brown']
for i, (name, group) in enumerate(presto_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.3)
plt.title('Nike: Off-White Presto', fontsize = 'large', color = 'white')
plt.xlabel('Days Since Release', color = 'white')
plt.ylabel('Price Premium', color = 'white')
ax.spines['bottom'].set_color('white')
ax.xaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.spines['left'].set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='y', colors='white')
plt.legend()
plt.show()
# Blazer Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-20, 500, 0, 8), zorder=-1,alpha = 0.4)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap1[0],cmap1[7],cmap1[0]]
for i, (name, group) in enumerate(blazer_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.3)
plt.title('Nike: Off-White Blazer', fontsize = 'large', color = 'white')
plt.xlabel('Days Since Release', color = 'white')
plt.ylabel('Price Premium', color = 'white')
ax.spines['bottom'].set_color('white')
ax.xaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.spines['left'].set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='y', colors='white')
plt.legend()
plt.show()
aj = offwhite.loc[offwhite['brand'] == 'airjordan']
aj_color = aj.groupby(['color'])
# Presto Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-20, 500, 0, 8), zorder=-1,alpha = 0.4)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap1[0],cmap1[7],cmap1[4],'brown']
for i, (name, group) in enumerate(presto_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.2)
plt.title('Nike: Off-White Presto', fontsize = 'large')
plt.xlabel('Days Since Release')
plt.ylabel('Price Premium')
ax.spines['bottom'].set_color('white')
ax.xaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.spines['left'].set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='y', colors='white')
plt.legend()
plt.show()
aj.shape
np.random.seed(19680801)
N = 5703
colors = np.random.rand(N)
area = (50 * np.random.rand(N))**2
plt.scatter(x = aj['week_since_release'], y = aj['Pct_change'], c=colors, alpha=0.5)
plt.title('AJ: Price premium on Weeks since release')
plt.xlabel('weeks since release')
plt.ylabel('price premium')
plt.show()
np.random.seed(19680801)
N = 3622
colors = np.random.rand(N)
area = (50 * np.random.rand(N))**2
plt.scatter(x = blazer['week_since_release'], y = blazer['Pct_change'], c=colors, alpha=0.5)
plt.title('Blazer: Price premium on Weeks since release')
plt.xlabel('weeks since release')
plt.ylabel('price premium')
plt.show()
```
## Yeezy days/weeks since release
```
timing.brand.unique()
yeezy= timing.loc[timing['brand'] == 'yeezy']
img2 = plt.imread('../data/media/yeezy.jpg')
yeezy.color.unique()
yeezy_color = yeezy.groupby(['color'])
# Plot
fig, ax = plt.subplots()
ax.imshow(img2, aspect='auto', extent=(-5, 1500, -2, 12), zorder=-1,alpha = 0.5)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap2[-1],cmap1[-1],cmap1[4],cmap1[0]]
for i, (name, group) in enumerate(yeezy_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.3)
plt.title('Adidas: Yeezy', fontsize = 'large')
plt.xlabel('Days Since Release')
plt.ylabel('Price Premium')
plt.legend()
plt.show()
yeezy.shape
np.random.seed(19680801)
N = 72162
colors = np.random.rand(N)
area = (50 * np.random.rand(N))**2
plt.scatter(x = yeezy['week_since_release'], y = yeezy['Pct_change'], c=colors, alpha=0.5)
plt.title('Yeezy: Price premium on Weeks since release')
plt.xlabel('weeks since release')
plt.ylabel('price premium')
plt.show()
```
| github_jupyter |
### <font color = "darkblue">Updates to Assignment</font>
#### If you were working on the older version:
* Please click on the "Coursera" icon in the top right to open up the folder directory.
* Navigate to the folder: Week 3/ Planar data classification with one hidden layer. You can see your prior work in version 6b: "Planar data classification with one hidden layer v6b.ipynb"
#### List of bug fixes and enhancements
* Clarifies that the classifier will learn to classify regions as either red or blue.
* compute_cost function fixes np.squeeze by casting it as a float.
* compute_cost instructions clarify the purpose of np.squeeze.
* compute_cost clarifies that "parameters" parameter is not needed, but is kept in the function definition until the auto-grader is also updated.
* nn_model removes extraction of parameter values, as the entire parameter dictionary is passed to the invoked functions.
# Planar data classification with one hidden layer
Welcome to your week 3 programming assignment. It's time to build your first neural network, which will have a hidden layer. You will see a big difference between this model and the one you implemented using logistic regression.
**You will learn how to:**
- Implement a 2-class classification neural network with a single hidden layer
- Use units with a non-linear activation function, such as tanh
- Compute the cross entropy loss
- Implement forward and backward propagation
## 1 - Packages ##
Let's first import all the packages that you will need during this assignment.
- [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.
- [sklearn](http://scikit-learn.org/stable/) provides simple and efficient tools for data mining and data analysis.
- [matplotlib](http://matplotlib.org) is a library for plotting graphs in Python.
- testCases provides some test examples to assess the correctness of your functions
- planar_utils provide various useful functions used in this assignment
```
# Package imports
import numpy as np
import matplotlib.pyplot as plt
from testCases_v2 import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
%matplotlib inline
np.random.seed(1) # set a seed so that the results are consistent
```
## 2 - Dataset ##
First, let's get the dataset you will work on. The following code will load a "flower" 2-class dataset into variables `X` and `Y`.
```
X, Y = load_planar_dataset()
```
Visualize the dataset using matplotlib. The data looks like a "flower" with some red (label y=0) and some blue (y=1) points. Your goal is to build a model to fit this data. In other words, we want the classifier to define regions as either red or blue.
```
# Visualize the data:
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
plt.figure(figsize=(35,30))
plt.imshow(X[:1])
```
You have:
- a numpy-array (matrix) X that contains your features (x1, x2)
- a numpy-array (vector) Y that contains your labels (red:0, blue:1).
Lets first get a better sense of what our data is like.
**Exercise**: How many training examples do you have? In addition, what is the `shape` of the variables `X` and `Y`?
**Hint**: How do you get the shape of a numpy array? [(help)](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html)
```
### START CODE HERE ### (≈ 3 lines of code)
shape_X = X.shape
shape_Y = Y.shape
m = X.shape[1] # training set size
### END CODE HERE ###
print ('The shape of X is: ' + str(shape_X))
print ('The shape of Y is: ' + str(shape_Y))
print ('I have m = %d training examples!' % (m))
```
**Expected Output**:
<table style="width:20%">
<tr>
<td>**shape of X**</td>
<td> (2, 400) </td>
</tr>
<tr>
<td>**shape of Y**</td>
<td>(1, 400) </td>
</tr>
<tr>
<td>**m**</td>
<td> 400 </td>
</tr>
</table>
## 3 - Simple Logistic Regression
Before building a full neural network, lets first see how logistic regression performs on this problem. You can use sklearn's built-in functions to do that. Run the code below to train a logistic regression classifier on the dataset.
```
# Train the logistic regression classifier
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(X.T, Y.T);
```
You can now plot the decision boundary of these models. Run the code below.
```
# Plot the decision boundary for logistic regression
plot_decision_boundary(lambda x: clf.predict(x), X, Y)
plt.title("Logistic Regression")
# Print accuracy
LR_predictions = clf.predict(X.T)
print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +
'% ' + "(percentage of correctly labelled datapoints)")
```
**Expected Output**:
<table style="width:20%">
<tr>
<td>**Accuracy**</td>
<td> 47% </td>
</tr>
</table>
**Interpretation**: The dataset is not linearly separable, so logistic regression doesn't perform well. Hopefully a neural network will do better. Let's try this now!
## 4 - Neural Network model
Logistic regression did not work well on the "flower dataset". You are going to train a Neural Network with a single hidden layer.
**Here is our model**:
<img src="images/classification_kiank.png" style="width:600px;height:300px;">
**Mathematically**:
For one example $x^{(i)}$:
$$z^{[1] (i)} = W^{[1]} x^{(i)} + b^{[1]}\tag{1}$$
$$a^{[1] (i)} = \tanh(z^{[1] (i)})\tag{2}$$
$$z^{[2] (i)} = W^{[2]} a^{[1] (i)} + b^{[2]}\tag{3}$$
$$\hat{y}^{(i)} = a^{[2] (i)} = \sigma(z^{ [2] (i)})\tag{4}$$
$$y^{(i)}_{prediction} = \begin{cases} 1 & \mbox{if } a^{[2](i)} > 0.5 \\ 0 & \mbox{otherwise } \end{cases}\tag{5}$$
Given the predictions on all the examples, you can also compute the cost $J$ as follows:
$$J = - \frac{1}{m} \sum\limits_{i = 0}^{m} \large\left(\small y^{(i)}\log\left(a^{[2] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[2] (i)}\right) \large \right) \small \tag{6}$$
**Reminder**: The general methodology to build a Neural Network is to:
1. Define the neural network structure ( # of input units, # of hidden units, etc).
2. Initialize the model's parameters
3. Loop:
- Implement forward propagation
- Compute loss
- Implement backward propagation to get the gradients
- Update parameters (gradient descent)
You often build helper functions to compute steps 1-3 and then merge them into one function we call `nn_model()`. Once you've built `nn_model()` and learnt the right parameters, you can make predictions on new data.
### 4.1 - Defining the neural network structure ####
**Exercise**: Define three variables:
- n_x: the size of the input layer
- n_h: the size of the hidden layer (set this to 4)
- n_y: the size of the output layer
**Hint**: Use shapes of X and Y to find n_x and n_y. Also, hard code the hidden layer size to be 4.
```
# GRADED FUNCTION: layer_sizes
def layer_sizes(X, Y):
"""
Arguments:
X -- input dataset of shape (input size, number of examples)
Y -- labels of shape (output size, number of examples)
Returns:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
"""
### START CODE HERE ### (≈ 3 lines of code)
n_x = X.shape[0] # size of input layer
n_h = 4
n_y = Y.shape[0] # size of output layer
### END CODE HERE ###
return (n_x, n_h, n_y)
X_assess, Y_assess = layer_sizes_test_case()
(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
```
**Expected Output** (these are not the sizes you will use for your network, they are just used to assess the function you've just coded).
<table style="width:20%">
<tr>
<td>**n_x**</td>
<td> 5 </td>
</tr>
<tr>
<td>**n_h**</td>
<td> 4 </td>
</tr>
<tr>
<td>**n_y**</td>
<td> 2 </td>
</tr>
</table>
### 4.2 - Initialize the model's parameters ####
**Exercise**: Implement the function `initialize_parameters()`.
**Instructions**:
- Make sure your parameters' sizes are right. Refer to the neural network figure above if needed.
- You will initialize the weights matrices with random values.
- Use: `np.random.randn(a,b) * 0.01` to randomly initialize a matrix of shape (a,b).
- You will initialize the bias vectors as zeros.
- Use: `np.zeros((a,b))` to initialize a matrix of shape (a,b) with zeros.
```
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h,n_x)*0.01
b1 = np.zeros((n_h,1))
W2 = np.random.randn(n_y,n_h)*0.01
b2 = np.zeros((n_y,1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
n_x, n_h, n_y = initialize_parameters_test_case()
parameters = initialize_parameters(n_x, n_h, n_y)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected Output**:
<table style="width:90%">
<tr>
<td>**W1**</td>
<td> [[-0.00416758 -0.00056267]
[-0.02136196 0.01640271]
[-0.01793436 -0.00841747]
[ 0.00502881 -0.01245288]] </td>
</tr>
<tr>
<td>**b1**</td>
<td> [[ 0.]
[ 0.]
[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td>**W2**</td>
<td> [[-0.01057952 -0.00909008 0.00551454 0.02292208]]</td>
</tr>
<tr>
<td>**b2**</td>
<td> [[ 0.]] </td>
</tr>
</table>
### 4.3 - The Loop ####
**Question**: Implement `forward_propagation()`.
**Instructions**:
- Look above at the mathematical representation of your classifier.
- You can use the function `sigmoid()`. It is built-in (imported) in the notebook.
- You can use the function `np.tanh()`. It is part of the numpy library.
- The steps you have to implement are:
1. Retrieve each parameter from the dictionary "parameters" (which is the output of `initialize_parameters()`) by using `parameters[".."]`.
2. Implement Forward Propagation. Compute $Z^{[1]}, A^{[1]}, Z^{[2]}$ and $A^{[2]}$ (the vector of all your predictions on all the examples in the training set).
- Values needed in the backpropagation are stored in "`cache`". The `cache` will be given as an input to the backpropagation function.
```
X.shape
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Argument:
X -- input data of size (n_x, m)
parameters -- python dictionary containing your parameters (output of initialization function)
Returns:
A2 -- The sigmoid output of the second activation
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
"""
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
### END CODE HERE ###
# Implement Forward Propagation to calculate A2 (probabilities)
### START CODE HERE ### (≈ 4 lines of code)
Z1 = np.dot(W1,X)+b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2,A1)+b2
A2 = sigmoid(Z2)
### END CODE HERE ###
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
X_assess, parameters = forward_propagation_test_case()
A2, cache = forward_propagation(X_assess, parameters)
# Note: we use the mean here just to make sure that your output matches ours.
print(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2']))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td> 0.262818640198 0.091999045227 -1.30766601287 0.212877681719 </td>
</tr>
</table>
Now that you have computed $A^{[2]}$ (in the Python variable "`A2`"), which contains $a^{[2](i)}$ for every example, you can compute the cost function as follows:
$$J = - \frac{1}{m} \sum\limits_{i = 1}^{m} \large{(} \small y^{(i)}\log\left(a^{[2] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[2] (i)}\right) \large{)} \small\tag{13}$$
**Exercise**: Implement `compute_cost()` to compute the value of the cost $J$.
**Instructions**:
- There are many ways to implement the cross-entropy loss. To help you, we give you how we would have implemented
$- \sum\limits_{i=0}^{m} y^{(i)}\log(a^{[2](i)})$:
```python
logprobs = np.multiply(np.log(A2),Y)
cost = - np.sum(logprobs) # no need to use a for loop!
```
(you can use either `np.multiply()` and then `np.sum()` or directly `np.dot()`).
Note that if you use `np.multiply` followed by `np.sum` the end result will be a type `float`, whereas if you use `np.dot`, the result will be a 2D numpy array. We can use `np.squeeze()` to remove redundant dimensions (in the case of single float, this will be reduced to a zero-dimension array). We can cast the array as a type `float` using `float()`.
```
# GRADED FUNCTION: compute_cost
def compute_cost(A2, Y, parameters):
"""
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
[Note that the parameters argument is not used in this function,
but the auto-grader currently expects this parameter.
Future version of this notebook will fix both the notebook
and the auto-grader so that `parameters` is not needed.
For now, please include `parameters` in the function signature,
and also when invoking this function.]
Returns:
cost -- cross-entropy cost given equation (13)
"""
m = Y.shape[1] # number of example
# Compute the cross-entropy cost
### START CODE HERE ### (≈ 2 lines of code)
logprobs = np.multiply(np.log(A2),Y)
cost = -np.sum(logprobs)
### END CODE HERE ###
cost = float(np.squeeze(cost)) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert(isinstance(cost, float))
return cost
A2, Y_assess, parameters = compute_cost_test_case()
print("cost = " + str(compute_cost(A2, Y_assess, parameters)))
```
**Expected Output**:
<table style="width:20%">
<tr>
<td>**cost**</td>
<td> 0.693058761... </td>
</tr>
</table>
Using the cache computed during forward propagation, you can now implement backward propagation.
**Question**: Implement the function `backward_propagation()`.
**Instructions**:
Backpropagation is usually the hardest (most mathematical) part in deep learning. To help you, here again is the slide from the lecture on backpropagation. You'll want to use the six equations on the right of this slide, since you are building a vectorized implementation.
<img src="images/grad_summary.png" style="width:600px;height:300px;">
<!--
$\frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } = \frac{1}{m} (a^{[2](i)} - y^{(i)})$
$\frac{\partial \mathcal{J} }{ \partial W_2 } = \frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } a^{[1] (i) T} $
$\frac{\partial \mathcal{J} }{ \partial b_2 } = \sum_i{\frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)}}}$
$\frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)} } = W_2^T \frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } * ( 1 - a^{[1] (i) 2}) $
$\frac{\partial \mathcal{J} }{ \partial W_1 } = \frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)} } X^T $
$\frac{\partial \mathcal{J} _i }{ \partial b_1 } = \sum_i{\frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)}}}$
- Note that $*$ denotes elementwise multiplication.
- The notation you will use is common in deep learning coding:
- dW1 = $\frac{\partial \mathcal{J} }{ \partial W_1 }$
- db1 = $\frac{\partial \mathcal{J} }{ \partial b_1 }$
- dW2 = $\frac{\partial \mathcal{J} }{ \partial W_2 }$
- db2 = $\frac{\partial \mathcal{J} }{ \partial b_2 }$
!-->
- Tips:
- To compute dZ1 you'll need to compute $g^{[1]'}(Z^{[1]})$. Since $g^{[1]}(.)$ is the tanh activation function, if $a = g^{[1]}(z)$ then $g^{[1]'}(z) = 1-a^2$. So you can compute
$g^{[1]'}(Z^{[1]})$ using `(1 - np.power(A1, 2))`.
```
# GRADED FUNCTION: backward_propagation
def backward_propagation(parameters, cache, X, Y):
"""
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
"""
m = X.shape[1]
# First, retrieve W1 and W2 from the dictionary "parameters".
### START CODE HERE ### (≈ 2 lines of code)
W1 = parameters['W1']
W2 = parameters['W2']
### END CODE HERE ###
# Retrieve also A1 and A2 from dictionary "cache".
### START CODE HERE ### (≈ 2 lines of code)
A1 = cache['A1']
A2 = cache['A2']
### END CODE HERE ###
# Backward propagation: calculate dW1, db1, dW2, db2.
### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above)
dZ2 = A2-Y
dW2 = (1/m)*np.dot(dZ2,A1.T)
db2 = (1/m)*np.sum(dZ2,axis=1,keepdims=True)
dZ1 = np.dot(W2.T,dZ2)*(1-np.power(A1,2))
dW1 = (1/m)*np.dot(dZ1,X.T)
db1 = (1/m)*np.sum(dZ1,axis=1,keepdims=True)
### END CODE HERE ###
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
parameters, cache, X_assess, Y_assess = backward_propagation_test_case()
grads = backward_propagation(parameters, cache, X_assess, Y_assess)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("db2 = "+ str(grads["db2"]))
```
**Expected output**:
<table style="width:80%">
<tr>
<td>**dW1**</td>
<td> [[ 0.00301023 -0.00747267]
[ 0.00257968 -0.00641288]
[-0.00156892 0.003893 ]
[-0.00652037 0.01618243]] </td>
</tr>
<tr>
<td>**db1**</td>
<td> [[ 0.00176201]
[ 0.00150995]
[-0.00091736]
[-0.00381422]] </td>
</tr>
<tr>
<td>**dW2**</td>
<td> [[ 0.00078841 0.01765429 -0.00084166 -0.01022527]] </td>
</tr>
<tr>
<td>**db2**</td>
<td> [[-0.16655712]] </td>
</tr>
</table>
**Question**: Implement the update rule. Use gradient descent. You have to use (dW1, db1, dW2, db2) in order to update (W1, b1, W2, b2).
**General gradient descent rule**: $ \theta = \theta - \alpha \frac{\partial J }{ \partial \theta }$ where $\alpha$ is the learning rate and $\theta$ represents a parameter.
**Illustration**: The gradient descent algorithm with a good learning rate (converging) and a bad learning rate (diverging). Images courtesy of Adam Harley.
<img src="images/sgd.gif" style="width:400;height:400;"> <img src="images/sgd_bad.gif" style="width:400;height:400;">
```
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate = 1.2):
"""
Updates parameters using the gradient descent update rule given above
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients
Returns:
parameters -- python dictionary containing your updated parameters
"""
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
### END CODE HERE ###
# Retrieve each gradient from the dictionary "grads"
### START CODE HERE ### (≈ 4 lines of code)
dW1 = grads['dW1']
db1 = grads['db1']
dW2 = grads['dW2']
db2 = grads['db2']
## END CODE HERE ###
# Update rule for each parameter
### START CODE HERE ### (≈ 4 lines of code)
W1 = W1-learning_rate*dW1
b1 = b1-learning_rate*db1
W2 = W2-learning_rate*dW2
b2 = b2-learning_rate*db2
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected Output**:
<table style="width:80%">
<tr>
<td>**W1**</td>
<td> [[-0.00643025 0.01936718]
[-0.02410458 0.03978052]
[-0.01653973 -0.02096177]
[ 0.01046864 -0.05990141]]</td>
</tr>
<tr>
<td>**b1**</td>
<td> [[ -1.02420756e-06]
[ 1.27373948e-05]
[ 8.32996807e-07]
[ -3.20136836e-06]]</td>
</tr>
<tr>
<td>**W2**</td>
<td> [[-0.01041081 -0.04463285 0.01758031 0.04747113]] </td>
</tr>
<tr>
<td>**b2**</td>
<td> [[ 0.00010457]] </td>
</tr>
</table>
### 4.4 - Integrate parts 4.1, 4.2 and 4.3 in nn_model() ####
**Question**: Build your neural network model in `nn_model()`.
**Instructions**: The neural network model has to use the previous functions in the right order.
```
# GRADED FUNCTION: nn_model
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
"""
Arguments:
X -- dataset of shape (2, number of examples)
Y -- labels of shape (1, number of examples)
n_h -- size of the hidden layer
num_iterations -- Number of iterations in gradient descent loop
print_cost -- if True, print the cost every 1000 iterations
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
# Initialize parameters
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
### START CODE HERE ### (≈ 4 lines of code)
# Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache".
A2, cache = forward_propagation(X, parameters)
# Cost function. Inputs: "A2, Y, parameters". Outputs: "cost".
cost = compute_cost(A2, Y, parameters)
# Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads".
grads = backward_propagation(parameters, cache, X, Y)
# Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters".
parameters = update_parameters(parameters, grads, learning_rate = 1.2)
### END CODE HERE ###
# Print the cost every 1000 iterations
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
X_assess, Y_assess = nn_model_test_case()
parameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=True)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected Output**:
<table style="width:90%">
<tr>
<td>
**cost after iteration 0**
</td>
<td>
0.692739
</td>
</tr>
<tr>
<td>
<center> $\vdots$ </center>
</td>
<td>
<center> $\vdots$ </center>
</td>
</tr>
<tr>
<td>**W1**</td>
<td> [[-0.65848169 1.21866811]
[-0.76204273 1.39377573]
[ 0.5792005 -1.10397703]
[ 0.76773391 -1.41477129]]</td>
</tr>
<tr>
<td>**b1**</td>
<td> [[ 0.287592 ]
[ 0.3511264 ]
[-0.2431246 ]
[-0.35772805]] </td>
</tr>
<tr>
<td>**W2**</td>
<td> [[-2.45566237 -3.27042274 2.00784958 3.36773273]] </td>
</tr>
<tr>
<td>**b2**</td>
<td> [[ 0.20459656]] </td>
</tr>
</table>
### 4.5 Predictions
**Question**: Use your model to predict by building predict().
Use forward propagation to predict results.
**Reminder**: predictions = $y_{prediction} = \mathbb 1 \text{{activation > 0.5}} = \begin{cases}
1 & \text{if}\ activation > 0.5 \\
0 & \text{otherwise}
\end{cases}$
As an example, if you would like to set the entries of a matrix X to 0 and 1 based on a threshold you would do: ```X_new = (X > threshold)```
```
# GRADED FUNCTION: predict
def predict(parameters, X):
"""
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
### START CODE HERE ### (≈ 2 lines of code)
A2, cache = forward_propagation(X, parameters)
predictions=np.zeros(A2.shape)
for i in range(A2.shape[1]):
if A2[0,i]<0.5:
predictions[0,i]=0
else:
predictions[0,i]=1
## END CODE HERE ###
return predictions
parameters, X_assess = predict_test_case()
predictions = predict(parameters, X_assess)
print("predictions mean = " + str(np.mean(predictions)))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td>**predictions mean**</td>
<td> 0.666666666667 </td>
</tr>
</table>
It is time to run the model and see how it performs on a planar dataset. Run the following code to test your model with a single hidden layer of $n_h$ hidden units.
```
# Build a model with a n_h-dimensional hidden layer
parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td>**Cost after iteration 9000**</td>
<td> 0.218607 </td>
</tr>
</table>
```
# Print accuracy
predictions = predict(parameters, X)
print ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')
```
**Expected Output**:
<table style="width:15%">
<tr>
<td>**Accuracy**</td>
<td> 90% </td>
</tr>
</table>
Accuracy is really high compared to Logistic Regression. The model has learnt the leaf patterns of the flower! Neural networks are able to learn even highly non-linear decision boundaries, unlike logistic regression.
Now, let's try out several hidden layer sizes.
### 4.6 - Tuning hidden layer size (optional/ungraded exercise) ###
Run the following code. It may take 1-2 minutes. You will observe different behaviors of the model for various hidden layer sizes.
```
# This may take about 2 minutes to run
plt.figure(figsize=(16, 32))
hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]
for i, n_h in enumerate(hidden_layer_sizes):
plt.subplot(5, 2, i+1)
plt.title('Hidden Layer of size %d' % n_h)
parameters = nn_model(X, Y, n_h, num_iterations = 5000)
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
predictions = predict(parameters, X)
accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)
print ("Accuracy for {} hidden units: {} %".format(n_h, accuracy))
```
**Interpretation**:
- The larger models (with more hidden units) are able to fit the training set better, until eventually the largest models overfit the data.
- The best hidden layer size seems to be around n_h = 5. Indeed, a value around here seems to fits the data well without also incurring noticeable overfitting.
- You will also learn later about regularization, which lets you use very large models (such as n_h = 50) without much overfitting.
**Optional questions**:
**Note**: Remember to submit the assignment by clicking the blue "Submit Assignment" button at the upper-right.
Some optional/ungraded questions that you can explore if you wish:
- What happens when you change the tanh activation for a sigmoid activation or a ReLU activation?
- Play with the learning_rate. What happens?
- What if we change the dataset? (See part 5 below!)
<font color='blue'>
**You've learnt to:**
- Build a complete neural network with a hidden layer
- Make a good use of a non-linear unit
- Implemented forward propagation and backpropagation, and trained a neural network
- See the impact of varying the hidden layer size, including overfitting.
Nice work!
## 5) Performance on other datasets
If you want, you can rerun the whole notebook (minus the dataset part) for each of the following datasets.
```
# Datasets
noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()
datasets = {"noisy_circles": noisy_circles,
"noisy_moons": noisy_moons,
"blobs": blobs,
"gaussian_quantiles": gaussian_quantiles}
### START CODE HERE ### (choose your dataset)
dataset = "noisy_moons"
### END CODE HERE ###
X, Y = datasets[dataset]
X, Y = X.T, Y.reshape(1, Y.shape[0])
# make blobs binary
if dataset == "blobs":
Y = Y%2
# Visualize the data
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
```
Congrats on finishing this Programming Assignment!
Reference:
- http://scs.ryerson.ca/~aharley/neural-networks/
- http://cs231n.github.io/neural-networks-case-study/
| github_jupyter |
# EDA classification
aim: When will a project succeed?
Which features influence the success of a project?
#### assumptions
* the higher the goal, the lower the probability for success
* the longer the duration the higher the probability for success
* the longer the preparation time the higher the probability for success
* the month of launch influences the probability for success
* the country influences the probability for success
* pledged amount per backer influences the probability for success
```
# import packages
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# read dataframe in
df = pd.read_csv('data/kickstarter_preprocess.csv')
# first summary
df.shape
df.info()
df.describe()
df.head()
# overview how many projects were successful, failed, canceled
df['state'].hist();
# make three new dataframes: one for success, one for failed and the last for canceled
df_suc = df.query('state == "successful"')
df_fai = df.query('state == "failed"')
df_can = df.query('state == "canceled"')
```
### assumption 1: the higher the goal, the lower the probability for success
```
plt.boxplot(df_suc['goal'])
plt.yscale('log');
df_suc.query('goal < 1').shape
df_suc.query('goal >= 500000').shape
# remove outlier
#df_suc.drop(df_suc[df_suc['goal'] > 100000].index, inplace=True)
df_suc1 = df_suc.query('goal <= 1500')
#plt.boxplot(df_suc1['goal'])
df_suc1.shape
df_suc2 = df_suc.query('1500 < goal < 7000')
#plt.boxplot(df_suc2['goal'])
df_suc2.shape
df_suc3 = df_suc.query('goal >= 7000')
#plt.boxplot(df_suc3['goal'])
df_suc3.shape
df_fai1 = df_fai.query('goal <= 1500')
df_fai1.shape
df_fai2 = df_fai.query('1500 < goal < 7000')
df_fai2.shape
df_fai3 = df_fai.query('goal >= 7000')
df_fai3.shape
df_can1 = df_can.query('goal <= 1500')
df_can1.shape
df_can2 = df_can.query('1500 < goal < 7000')
df_can2.shape
df_can3 = df_can.query('goal >= 7000')
df_can3.shape
# making a categorical variable for goal 0='goal <= 1500' 1='1500 < goal < 7000', 2='goal >= 7000'
#df.loc[df['goal'] <= 1500, 'goal_split'] = 0
#df.loc[(df['goal'] > 1500) & (df['goal'] < 7000), 'goal_split'] = 1
#df.loc[df['goal'] >= 7000, 'goal_split'] = 2
#sns.barplot(x='goal_split', y=None, hue="state", data=df)
# set width of bar
barWidth = 0.25
fig = plt.subplots(figsize =(12, 8))
# set height of bar
suc = [29467, 35129, 29650]
fai = [13763, 22526, 37909]
can = [1656, 2502, 4460]
# Set position of bar on X axis
br1 = np.arange(3)
br2 = [x + barWidth for x in br1]
br3 = [x + barWidth for x in br2]
p1 = plt.bar(br1, suc, color ='g', width = barWidth,
edgecolor ='grey', tick_label ='success')
p2 = plt.bar(br2, fai, color ='r', width = barWidth,
edgecolor ='grey', tick_label ='failed')
p3 = plt.bar(br3, can, color ='b', width = barWidth,
edgecolor ='grey', tick_label ='canceled')
# Adding Xticks
plt.xlabel('goal_split', fontweight ='bold')
plt.ylabel('count', fontweight ='bold')
plt.xticks([r + barWidth for r in range(3)],
['goal <= 1500', '1500 < goal < 7000', 'goal >= 7000'])
plt.legend((p1[0], p2[0], p3[0]), ('success', 'failed', 'canceled'))
plt.show()
#df1 = df.query('goal <= 1500')
#df2 = df.query('1500 < goal < 7000')
#df3 = df.query('goal >= 7000')
```
## conclucsion
the lower the goal the higher the probability for success
```
sns.violinplot(x ="state", y ="goal", data = df_suc);
var1 = 'state'
data1 = pd.concat([df_suc['goal'], df_suc[var1]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig1 = sns.violinplot(x=var1, y="goal", data=data1, scale="count")
fig1.axis(ymin=0, ymax=100000);
#plt.yscale('log')
df.query('goal <= 1').shape
df.query('goal >= 1000000').shape
df.query('goal >= 1000000 and state == "successful"').shape
sta_dur = df.plot(x='state',
y='duration_days',
kind='scatter')
```
### assumption 2: the longer the duration the higher the probability for success
```
plt.boxplot(df['duration_days']);
dur = [df.query('duration_days <= 20').shape, df.query('20 < duration_days <= 30').shape,
df.query('30 < duration_days <= 40').shape, df.query('duration_days > 40').shape]
dur
dur1 = [df_suc.query('duration_days <= 20').shape, df_suc.query('20 < duration_days <= 30').shape,
df_suc.query('30 < duration_days <= 40').shape, df_suc.query('duration_days > 40').shape]
dur1
dur2 = [df_fai.query('duration_days <= 20').shape, df_fai.query('20 < duration_days <= 30').shape,
df_fai.query('30 < duration_days <= 40').shape, df_fai.query('duration_days > 40').shape]
dur2
dur3 = [df_can.query('duration_days <= 20').shape, df_can.query('20 < duration_days <= 30').shape,
df_can.query('30 < duration_days <= 40').shape, df_can.query('duration_days > 40').shape]
dur3
# set width of bar
barWidth = 0.25
fig = plt.subplots(figsize =(12, 8))
# set height of bar
suc = [12052, 55097, 15513, 12116]
fai = [6150, 43036, 8119, 16893]
can = [653, 4661, 1163, 2141]
# Set position of bar on X axis
br1 = np.arange(4)
br2 = [x + barWidth for x in br1]
br3 = [x + barWidth for x in br2]
p1 = plt.bar(br1, suc, color ='g', width = barWidth,
edgecolor ='grey', tick_label ='success')
p2 = plt.bar(br2, fai, color ='r', width = barWidth,
edgecolor ='grey', tick_label ='failed')
p3 = plt.bar(br3, can, color ='b', width = barWidth,
edgecolor ='grey', tick_label ='canceled')
# Adding Xticks
plt.xlabel('goal_split', fontweight ='bold')
plt.ylabel('count', fontweight ='bold')
plt.xticks([r + barWidth for r in range(4)],
['duration_days <= 20', '20 < duration_days <= 30', '30 < duration_days <= 40', 'duration_days > 40'])
plt.legend((p1[0], p2[0], p3[0]), ('success', 'failed', 'canceled'))
plt.show()
```
### assumption 3: the longer the preparation time the higher the probability for success
### assumption 4: the month of launch influences the probability for success
```
sta_month = df.plot(x='launched_month',
y='state',
kind='scatter')
# boxplot sqft_living (small houses, big houses, bad neighborhood)
fig, axes = plt.subplots(ncols=3,sharex=True,sharey=True,figsize=(9,6))
ax1 = df_suc.boxplot(column=['duration_days'],ax=axes[0])
ax1.set_title("duration_days successful", fontsize = 10)
ax1.set_ylabel("count");
ax2 = df_fai.boxplot(column=['duration_days'],ax=axes[1])
ax2.set_title("duration_days failed", fontsize = 10);
ax3 = df_can.boxplot(column=['duration_days'],ax=axes[2]);
ax3.set_title("duration_days canceled", fontsize = 10);
df_suc['duration_days'].mean(), df_suc['duration_days'].median()
df_fai['duration_days'].mean(), df_fai['duration_days'].median()
df_can['duration_days'].mean(), df_can['duration_days'].median()
df.groupby('state').count()['successful']
```
### assumption 5: the country influences the probability for success
```
cou_suc = df_suc.groupby(['country'])['country'].count()
cou_fai = df_fai.groupby(['country'])['country'].count()
cou_can = df_can.groupby(['country'])['country'].count()
pd.merge(cou_suc, cou_fai, cou_can, on=['country'],suffixes=[' successful', ' failed', ' canceled'])
#df['country'].unique()
cou_can
fig, axes = plt.subplots(ncols=3,sharex=True,sharey=True,figsize=(15,6))
ax1 = cou_suc.hist(column=['country'],ax=axes[0])
axes[0].set_title('country successful')
axes[0].set_xlabel('country')
axes[0].set_ylabel('count')
ax2 = cou_fai.hist(column=['country'],ax=axes[1])
axes[1].set_title('country failed')
axes[1].set_xlabel('country')
axes[1].set_ylabel('count')
ax3 = cou_can.hist(column=['country'],ax=axes[2])
axes[2].set_title('country canceled')
axes[2].set_xlabel('country')
axes[2].set_ylabel('count');
cou_suc.plot(kind='bar');
cou_fai.plot(kind='bar');
cou_can.plot(kind='bar');
cou = df_suc.groupby('country')['country'].count()
cou = list(cou)
cou1 = df['country'].unique()
# Creating plot
fig = plt.figure(figsize =(10, 7))
plt.pie(cou, labels = cou1)
# show plot
plt.show()
country_suc = df_suc.groupby(df_suc['country'])
#pledged = amt_pledged.sum().sort_values(ascending=0)[0:10]
ax = country_suc.plot(kind="bar")
ax.set_title("Amount by Country")
ax.set_ylabel("Amount")
ax.set_xlabel("Country")
vals = ax.get_yticks()
```
### assumption 6: pledged amount per backer influences the probability for success
```
df.groupby('state').pledged_per_backer.mean()
df_suc.groupby('staff_pick').count()
df_fai.groupby('staff_pick').count()
df_can.groupby('staff_pick').count()
df_suc['cat_in_slug'].hist()
df_fai['cat_in_slug'].hist()
df_can['cat_in_slug'].hist()
sns.catplot(x = "cat_in_slug", kind = 'count', hue="state", data=df);
#sns.barplot(x='cat_in_slug', hue='state', data=df)
#df.groupby('cat_in_slug').plot(x='state', kind='bar')
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
import numpy as np
import matplotlib.pyplot as plt
import wkw
import torch
from torch.utils import data
from torch.utils.tensorboard import SummaryWriter
# imports from genem package
from genem.util import viewData,gpu
# Get the empty gpu
gpu.get_empty_gpu()
# Log directory
log_root = './__logs__/'
# The mag 1 of this dataset points to mag 8-8-1 in the original dataset
path_wkw = '/tmpscratch/webknossos/Connectomics_Department/2018-11-13_scMS109_1to7199_v01_l4_06_24_fixed_mag8/color/1/'
# Open the dataset for reading
dataset = wkw.Dataset.open(path_wkw)
# Bounding box taken out of the datasource.json
dataset_bbox = 19500, 15250, 9, 11250, 11250,7168
# Read a single plane
emData = dataset.read([19500, 15250, 9], [302, 302, 7000])
# View the data
viewData.ImageSliceViewer3D(emData.squeeze(0))
# Transform data so that the third dimension would be the first so that each slice would be an individual sample
correctedDimforPytorch = emData.squeeze(0).transpose((2,0,1))
viewData.ImageSliceViewer3D(correctedDimforPytorch)
# Normalize data
data_floatType = correctedDimforPytorch.astype(np.single)
data_normalized = np.asarray((data_floatType - data_floatType.mean())/data_floatType.std())
data_expanded = np.expand_dims(data_normalized,1)
data_expanded.shape,data_expanded.mean(),data_expanded.std()
viewData.ImageSliceViewer3D(data_normalized)
torch.from_numpy(data_expanded).shape
# create pytorch data loader from the numpy array
dataSet = data.TensorDataset(torch.from_numpy(data_expanded))
trainloader = data.DataLoader(dataSet, batch_size=32, shuffle=True, num_workers=2)
for i,curTensor in enumerate(trainloader):
print(curTensor[0].shape)
# Code copied from the autoencoder script
def get_conv_pad(input_size, kernel_size, stride):
padding = np.ceil(((stride-1)*input_size-stride+kernel_size)/2).astype(int)
return padding
def data2fig_subplot(inputs, outputs, idx):
fig, axs = plt.subplots(1, 2, figsize=(16,12))
input_cpu = inputs[idx].data.cpu()
img_input = input_cpu.numpy().squeeze()
axs[0].imshow(img_input, cmap='gray')
output_cpu = outputs[idx].data.cpu()
img_output = output_cpu.numpy().squeeze()
axs[1].imshow(img_output, cmap='gray')
return fig
class Encoder_4_sampling_bn(torch.nn.Module):
def __init__(self, input_size, kernel_size, stride, n_fmaps, n_latent):
super().__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.stride = stride
self.n_fmaps = n_fmaps
self.n_latent = n_latent
self.encoding_conv1 = torch.nn.Sequential(
torch.nn.Conv2d(1, n_fmaps, kernel_size, stride),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2))
self.encoding_conv2 = torch.nn.Sequential(
torch.nn.Conv2d(n_fmaps, n_fmaps*2, kernel_size, stride),
torch.nn.BatchNorm2d(n_fmaps*2),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2))
self.encoding_conv3 = torch.nn.Sequential(
torch.nn.Conv2d(n_fmaps*2, n_fmaps*4, kernel_size, stride),
torch.nn.BatchNorm2d(n_fmaps*4),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2))
self.encoding_conv4 = torch.nn.Sequential(
torch.nn.Conv2d(n_fmaps*4, n_fmaps*8, kernel_size, stride),
torch.nn.BatchNorm2d(n_fmaps*8),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2))
self.encoding_fc = torch.nn.Sequential(
torch.nn.Linear(17**2*n_fmaps*8, n_latent),
torch.nn.ReLU())
def forward(self, x):
x = self.encoding_conv1(x)
x = self.encoding_conv2(x)
x = self.encoding_conv3(x)
x = self.encoding_conv4(x)
x = self.encoding_fc(x.reshape((-1, 1, 17**2*n_fmaps*8)))
return x
class Decoder_4_sampling_bn(torch.nn.Module):
def __init__(self, output_size, kernel_size, stride, n_fmaps, n_latent):
super().__init__()
self.output_size = output_size
self.kernel_size = kernel_size
self.stride = stride
self.n_fmaps = n_fmaps
self.n_latent = n_latent
self.decoding_fc = torch.nn.Sequential(
torch.nn.Linear(n_latent, 17**2*n_fmaps*8),
torch.nn.ReLU())
self.decoding_convt1 = torch.nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='nearest'),
torch.nn.ConvTranspose2d(n_fmaps*8, n_fmaps*4, kernel_size, stride),
torch.nn.BatchNorm2d(n_fmaps*4),
torch.nn.ReLU())
self.decoding_convt2 = torch.nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='nearest'),
torch.nn.ConvTranspose2d(n_fmaps*4, n_fmaps*2, kernel_size, stride),
torch.nn.BatchNorm2d(n_fmaps*2),
torch.nn.ReLU())
self.decoding_convt3 = torch.nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='nearest'),
torch.nn.ConvTranspose2d(n_fmaps*2, n_fmaps, kernel_size, stride),
torch.nn.BatchNorm2d(n_fmaps),
torch.nn.ReLU())
self.decoding_convt4 = torch.nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='nearest'),
torch.nn.ConvTranspose2d(n_fmaps, 1, kernel_size, stride))
def forward(self, x):
x = self.decoding_fc(x)
x = self.decoding_convt1(x.reshape((-1, n_fmaps*8, 17, 17)))
x = self.decoding_convt2(x)
x = self.decoding_convt3(x)
x = self.decoding_convt4(x)
return x
class AE(torch.nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
input_size=302
output_size=input_size
kernel_size=3
stride=1
n_fmaps=8
n_latent=5000
net = AE(
Encoder_4_sampling_bn(input_size, kernel_size, stride, n_fmaps, n_latent),
Decoder_4_sampling_bn(output_size, kernel_size, stride, n_fmaps, n_latent))
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.02, momentum=0.9)
# Copy network to cuda
#net = net.cuda();
writer = SummaryWriter(log_root)
writer_int = 5
#epoch_writer_int = 5
n_epoch = 50000
it = 0
for epoch in range(n_epoch): # loop over the dataset multiple times
running_loss = 0.0
for i, curData in enumerate(trainloader):
it += 1
inputs = curData[0]
#inputs = inputs.cuda();
labels = inputs
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if (i+1) % writer_int == 0:
avg_loss = running_loss / writer_int
#if (epoch+1) % epoch_writer_int == 0:
print('it: {} (epoch: {}, batch: {}), running loss: {:0.3f}'.format(it, epoch, i+1, avg_loss))
writer.add_scalar('loss', loss.item(), it)
writer.add_scalar('avg_loss', avg_loss, it)
writer.add_figure('inputs', data2fig_subplot(inputs, outputs, 0), it)
running_loss = 0.0
print(len(trainloader))
enumerate(trainloader)
data2fig_subplot(inputs, outputs, 11);
```
| github_jupyter |
---
_You are currently looking at **version 1.2** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
---
# Assignment 2 - Pandas Introduction
All questions are weighted the same in this assignment.
## Part 1
The following code loads the olympics dataset (olympics.csv), which was derrived from the Wikipedia entry on [All Time Olympic Games Medals](https://en.wikipedia.org/wiki/All-time_Olympic_Games_medal_table), and does some basic data cleaning.
The columns are organized as # of Summer games, Summer medals, # of Winter games, Winter medals, total # number of games, total # of medals. Use this dataset to answer the questions below.
```
import numpy
import pandas as pd
import pandas
df = pd.read_csv('olympics.csv', index_col=0, skiprows=1)
data = df
data.describe()
data.head()
```
First the unicode is removed from the column names and the numbered column names are changed to their meaning.
```
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold'+col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver'+col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#'+col[1:]}, inplace=True)
data.head()
names_ids = df.index.str.split('\s\(') # split the index by '('
df.index = names_ids.str[0] # the [0] element is the country name (new index)
df['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that)
df = df.drop('Totals')
df.head()
```
### Question 0 (Example)
What is the first country in df?
*This function should return a Series.*
```
# You should write your whole answer within the function provided. The autograder will call
# this function and compare the return value against the correct solution value
def answer_zero():
# This function returns the row for Afghanistan, which is a Series object. The assignment
# question description will tell you the general format the autograder is expecting
return df.iloc[0]
# You can examine what your function returns by calling it in the cell. If you have questions
# about the assignment formats, check out the discussion forums for any FAQs
answer_zero()
data = df
summer_gold = "Gold"
winter_gold = "Gold.1"
total_gold = "Gold.2"
total_silver = "Silver.2"
total_bronze = "Bronze.2"
```
### Question 1
Which country has won the most gold medals in summer games?
*This function should return a single string value.*
```
def answer_one():
return data[summer_gold].argmax()
answer_one()
```
### Question 2
Which country had the biggest difference between their summer and winter gold medal counts?
*This function should return a single string value.*
```
def answer_two():
return (data[summer_gold] - data[winter_gold]).abs().argmax()
answer_two()
```
### Question 3
Which country has the biggest difference between their summer gold medal counts and winter gold medal counts relative to their total gold medal count?
$$\frac{Summer~Gold - Winter~Gold}{Total~Gold}$$
Only include countries that have won at least 1 gold in both summer and winter.
*This function should return a single string value.*
```
def answer_three():
elegible = data[(data[summer_gold]>=1) & (data[winter_gold]>=1)]
ratios = (elegible[summer_gold] - elegible[winter_gold]).abs()/elegible[total_gold]
return ratios.argmax()
answer_three()
```
### Question 4
Write a function that creates a Series called "Points" which is a weighted value where each gold medal (`Gold.2`) counts for 3 points, silver medals (`Silver.2`) for 2 points, and bronze medals (`Bronze.2`) for 1 point. The function should return only the column (a Series object) which you created.
*This function should return a Series named `Points` of length 146*
```
def answer_four():
"""
Creates weighted points based on medals
* Gold: 3 points
* Silver: 2 points
* Bronze: 1 point
Returns
-------
Series: column of points for each NOC
"""
points = numpy.zeros(len(data))
points += data[total_gold] * 3
points += data[total_silver] * 2
points += data[total_bronze]
return pandas.Series(points, index=data.index)
points = answer_four()
assert points.loc["United States"] == 5684
```
## Part 2
For the next set of questions, we will be using census data from the [United States Census Bureau](http://www.census.gov/popest/data/counties/totals/2015/CO-EST2015-alldata.html). Counties are political and geographic subdivisions of states in the United States. This dataset contains population data for counties and states in the US from 2010 to 2015. [See this document](http://www.census.gov/popest/data/counties/totals/2015/files/CO-EST2015-alldata.pdf) for a description of the variable names.
The census dataset (census.csv) should be loaded as census_df. Answer questions using this as appropriate.
### Question 5
Which state has the most counties in it? (hint: consider the sumlevel key carefully! You will need this for future questions too...)
*This function should return a single string value.*
```
census_df = pd.read_csv('census.csv')
census_df.head()
census_data = census_df
class CensusVariables:
state_name = "STNAME"
county_name = "CTYNAME"
census_population = "CENSUS2010POP"
region = "REGION"
population_2014 = "POPESTIMATE2014"
population_2015 = "POPESTIMATE2015"
population_estimates = ["POPESTIMATE2010",
"POPESTIMATE2011",
"POPESTIMATE2012",
"POPESTIMATE2013",
population_2014,
population_2015]
county_level = 50
summary_level = "SUMLEV"
counties = census_data[census_data[
CensusVariables.summary_level]==CensusVariables.county_level]
# this throws off the numeric index for the argmax method so reset it
counties = counties.reset_index()
# but the last question wants the original index
counties_original_index = census_data[census_data[
CensusVariables.summary_level]==CensusVariables.county_level]
def answer_five():
"""finds state with the most counties
Returns
-------
str: name of state with the most counties
"""
return counties.groupby(
CensusVariables.state_name).count().COUNTY.argmax()
answer_five()
```
### Question 6
Only looking at the three most populous counties for each state, what are the three most populous states (in order of highest population to lowest population)? Use `CENSUS2010POP`.
*This function should return a list of string values.*
```
def answer_six():
"""finds three most populous states based on top three counties in each
Returns
-------
List: top three state-names (highest to lowest)
"""
top_threes = counties.groupby(
CensusVariables.state_name
)[CensusVariables.census_population].nlargest(3)
states = top_threes.groupby(level=0).sum()
return list(states.nlargest(3).index)
answer_six()
```
### Question 7
Which county has had the largest absolute change in population within the period 2010-2015? (Hint: population values are stored in columns POPESTIMATE2010 through POPESTIMATE2015, you need to consider all six columns.)
e.g. If County Population in the 5 year period is 100, 120, 80, 105, 100, 130, then its largest change in the period would be |130-80| = 50.
*This function should return a single string value.*
```
def answer_seven():
"""Find county with largest absolute population variance
Returns
-------
str: name of the county
"""
return counties.iloc[
(counties[
CensusVariables.population_estimates].max(axis=1) -
counties[
CensusVariables.population_estimates].min(axis=1)
).argmax()][CensusVariables.county_name]
answer_seven()
```
### Question 8
In this datafile, the United States is broken up into four regions using the "REGION" column.
Create a query that finds the counties that belong to regions 1 or 2, whose name starts with 'Washington', and whose POPESTIMATE2015 was greater than their POPESTIMATE 2014.
*This function should return a 5x2 DataFrame with the columns = ['STNAME', 'CTYNAME'] and the same index ID as the census_df (sorted ascending by index).*
```
def answer_eight():
"""find region 1 or 2 counties:
,* with names that start with Washington
,* whose population grew from 2014 to 2015
.. note:: the index in the final data-frame has to match the original
census data
Returns
-------
DataFrame: with the county and state-name columns
"""
regions = counties_original_index[
(counties_original_index[CensusVariables.region]==1) |
(counties_original_index[CensusVariables.region]==2)]
washingtons = regions[
regions[CensusVariables.county_name].str.startswith("Washington")]
grew = washingtons[washingtons[CensusVariables.population_2015] >
washingtons[CensusVariables.population_2014]]
return grew[[CensusVariables.state_name,
CensusVariables.county_name]]
outcome = answer_eight()
assert outcome.shape == (5,2)
print(outcome)
```
| github_jupyter |
# Neural Networks: Back-propagation
Backprop is how the network evaluates its performance during feed forward. Combined with gradient descent, back-propagation helps train the network.
# Further reading
* [Peter's Notes](http://peterroelants.github.io/posts/neural_network_implementation_part01/) are a bit mathy and specific, but I've found them helpful when confused
* [Deep Learning Basics](http://alexminnaar.com/deep-learning-basics-neural-networks-backpropagation-and-stochastic-gradient-descent.html), a guide that covers about the same ground as this one
* [A Step by Step Backpropagation Example](https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/) using actual numbers
* [3Blue1Brown's calculus videos.](https://www.youtube.com/playlist?list=PLZHQObOWTQDMsr9K-rj53DwVRMYO3t5Yr) If you want to go deeper into calculus, these are good to get you motivated.
* [Again, 3Blue2Brown's neural network videos might be useful.](https://www.youtube.com/playlist?list=PLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi)
## Intuition
If the feed-forward is data pushed all the way forward to the outputs, then back-propagation is the trickling back down of errors from the outputs all the way back to the earliest neurons.
Back-propagation is necessary if you want to use gradient descent on neural networks of multiple layers. Single-layer models can calculate their errors in one step, but multi-layer networks require a multi-step process to get it done.

The back-propagation journey all starts at the output. Here there is a clear link between the choice of parameters (weights and biases) and the output error. The approach here is the same as simple gradient descent.
At the layer preceeding the output, we'll call it $l_2$, there is an extra step. What is the link between $l_2$ weights and biases and the output error? It has multiple steps: $l_2$ has a direct effect on the output layer's data, and the output layer's data has a direct effect on what the model decides to output. It takes two steps to get back to the end.
In other words, the output layer is the boss and it is directly responsible for the model's error. If the output layer changes its behaviour, it can directly improve its accuracy. It's the easiest to train.
The hidden layers are not directly responsible for the model's error; however, they are responsible for providing the output layer accurate analyses of the model's input data. Knowing their boss, they have an idea of how to change their computations so that the big cheese makes more informed decisions. Their gradient formulas in fact depend on the output layer's weights (the boss's personality, you might say).
## Detour: gradient checking
Backprop takes snapshots of errors everywhere in the NN and uses these to adjust parameters. Normally this is done with calculus and repeated applications of the chain rule of differentiation.
Backprop can also be done by more primitive methods, albeit much more slowly. Numerical differentiation is used to teach students calculus, so it makes sense to show it here first before breaking out the chain rule.
The idea behind numerical differentiation is this:
1. Take your NN as is
2. Adjust a parameter slightly and see the effect on output error
3. You now know the effect of that parameter on error

Given $J(\theta_{i,j})$ your cost function, $\theta$ any parameter anywhere in the neural network, and $\epsilon$ a small value as a "nudge",
$$\frac{\partial J(\theta)}{\partial \theta_{i,j}} \approx \frac{J(\theta_{i,j} + \epsilon) - J(\theta_{i,j} - \epsilon)}{2\epsilon}$$
Backprop does this for every parameter in the NN. If it sees that the output error increases when a parameter is increased, it will decrease the parameter. If output error increases when the parameter is decreased, backprop increases the parameter instead.
(This is the gradient descent algorithm: it sees error and rolls down the slope in the opposite direction.)
Ultimately, this makes a neural network more complicated than any collection of corporate committees. Except in rare prophetic instances, an office worker will not know how many dollars their actions win/lose their company. With neural networks, a single neuron will know how much error it is causing its network. And yet, it's never guaranteed that the neuron can do something useful with this information!
It's a good idea to use gradient checking. It's a good backup.
## Back propagation II
I hope the above has made back propagation make some sense. It's now time for some light mathematics. Don't worry I'll just paste the answers and skip the algebraic Tetris.
Here are the gradients with respect to error for parameters in the NN model.
Glossary:
* An error gradient: the "slope" of the error. All the model needs to know is which direction this is.
* The Jacobian: a matrix full of gradients. Since all of our parameters are stored in matrices, it makes sense that we'd store all of our gradients in matrices too.
**Note 1:** With these equations, the most important part is whether they're positive or negative, so you can look at them to see what affects their sign. Gradient decsent will generally work alright as long as it's heading in the right direction (has the right sign).
**Note 2:** When a gradient is positive (error is increasing with parameter) you want to decrease the parameter. When the gradient is negative, you want to increase the parameter.
The gradients for the output layer weights and biases are
$$\frac{\partial J(\theta)}{\partial B_{out}} = 1 \cdot (a_{out} - y), \frac{\partial J(\theta)}{\partial W_{out}} = a_2^T \cdot (a_{out} - y)$$
The above equations make some sense. If the output neuron is overshooting the target, reduce the bias. It's a similar idea with the weights: if the weights cause the neuron to overshoot when they are given a positive input, they need to be reduced.
(You need the 1 in the bias gradient. It represents the intercept but is also necessary to get the right dimension.)
To proceed lower into the previous layer, we have to do some backprop. Here it is:
$$\delta_{out} = (a_{out} - y) \cdot W_{out}^T$$
We also need the derivative of the sigmoid function. We'll just call it $\sigma^\prime$.
We just have to include that in our equations and we'll be fine. The gradients for the second hidden layer are:
$$\frac{\partial J(\theta)}{\partial W_2} = a_1^T \cdot (\sigma^\prime(a_2) \circ \delta_{out}), \frac{\partial J(\theta)}{\partial B_2} = 1 \cdot \sigma^\prime(a_2) \circ \delta_{out}$$
The sigmoid derivative $\sigma^\prime$ is a newcomer, but otherwise these are similar to before. The weight gradients depend on layer 2's input, which comes from layer 1. The bias gradient is simpler, but it still has to pass through the $\sigma^\prime$ and the $\delta_{out}$.
For layer 1 we need a new delta.
$$\delta_2 = (\delta_{out} \circ \sigma^\prime(a_2)) \cdot W_2^T$$
Finally, the last backprop step.
$$\frac{\partial J(\theta)}{\partial W_1} = x^T \cdot (\sigma^\prime(a_1) \circ \delta_2), \frac{\partial J(\theta)}{\partial B_1} = 1 \cdot \sigma^\prime(a_1) \circ \delta_2$$
That's all there is to it.
## Gradient interpretation
In this section, I'll do my best to narrate what back propagation is doing. Feel free to skip this section.
Back propagation is a repeated application of the chain-rule of differentiation, and its purpose is to determine the effect of a parameter on model error, which is the error gradient with respect to the parameter ($W_1$, $B_{out}$, etc).
Let's take the weight update below as an example, starting with the $a_1^T$ term.
$$\frac{\partial J(\theta)}{\partial W_2} = a_1^T \cdot (\sigma^\prime(a_2) \circ \delta_{out})$$
Recall that $W_2$'s role in the neural network is to do the following:
$$a_2 = \sigma(a_1 W_2 + B_2), z_2 = a_1 W_2 + B_2$$
Let's put everything together by answering the question: *how does $W_2$ affect $z_2$?*
The answer: *$W_2$ affects $z_2$ through its interaction with $a_1$.*
The derivative $\frac{\partial z_2}{\partial W_2} = \frac{\partial a_1 W_2 + B_2}{\partial W_2} = a_1$ signifies that $z_2$ increases by $a_1$ when $W_2$ increases by 1. This works out nicely here since $a_1 W_2$ is linear; normally though the derivative "slope" only holds in a very small area around the current point.
This is where the $a_1^T$ in the gradient comes from. What does it mean? It means that $W_2$'s job is to multiply $a_1$, so its contribution to the model output is $a_1$. Since model error is closely related to model output (through the cost function), $a_1$ is also $W_2$'s contribution to model error.
That covers $a_1^T$.
Let's now look at that $\sigma^\prime(a_2)$ term.
$\sigma^\prime(a_2)$ is the sigmoid's contribution to error. $W_2$'s contribution to error, seen above, passes through the simgoid prime. The derivative of the sigmoid function is $\sigma(a_2)^\prime = \sigma(a_2) (1 - \sigma(a_2))$. Looking at it a bit, it becomes apparent that the derivative reaches its maximum value when $\sigma(a_2) = 1 - \sigma(a_2)$, or when $\sigma(a_2) = 0.5$ or $a_2 = 0$. The slope of the derivative vanishes when $a_2$ approaches 0 or 1. Thus the sigmoid's contribution to error: it restricts the flow of error depending on the value of $a_2$ fed to it, $\sigma^\prime(a_2)$.
Story so far: $\frac{\partial J(\theta)}{\partial W_2}$ is $a_1$ passed through $\sigma^\prime$, the latter at most being 0.5 but possibly 0.0.
The next component of $W_2$'s gradient is $\delta_{out} = (a_{out} - y) \cdot W_{out}^T$.
We are seeing $W_{out}$ here because $W_2$'s effect must pass through it to reach the model error. The idea here is that stronger $W_{out}$ values mean that whatever $x W$ outputs will be mangnified, while weaker $W_{out}$ will attenuate $W_2$'s influence. Therefore $W_{out}$ is a part of $W_2$'s effect on error.
$(a_{out} - y)$ is more difficult to explain because it is an algebraic simplication. Its full form is $\frac{\partial NLL_{cost} \sigma(a_{out})}{\partial a_{out}}$. But that isn't too important. The first role of $a_{out} - y$ is to keep the gradient positive if $a_{out} > y$ but turn it negative if $a_{out} < y$: this makes sense since you want to increase/lower $a_{out}$ if it has undershot/overshot y. The second role of $a_{out} - y$ is to return a higher error value the wider a gap there is between model output and true output: this gap resides in $[-1, 1]$ since all outputs belong in $[0, 1]$.
There you have it. I hope this has helped you understand gradients a little bit better.
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# AutoML 06: Custom CV Splits and Handling Sparse Data
In this example we use the scikit-learn's [20newsgroup](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_20newsgroups.html) to showcase how you can use AutoML for handling sparse data and how to specify custom cross validations splits.
Make sure you have executed the [00.configuration](00.configuration.ipynb) before running this notebook.
In this notebook you will learn how to:
1. Create an `Experiment` in an existing `Workspace`.
2. Configure AutoML using `AutoMLConfig`.
4. Train the model.
5. Explore the results.
6. Test the best fitted model.
In addition this notebook showcases the following features
- **Custom CV** splits
- Handling **sparse data** in the input
## Create an Experiment
As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
```
import logging
import os
import random
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
from sklearn import datasets
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
ws = Workspace.from_config()
# choose a name for the experiment
experiment_name = 'automl-local-missing-data'
# project folder
project_folder = './sample_projects/automl-local-missing-data'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data=output, index=['']).T
```
## Diagnostics
Opt-in diagnostics for better experience, quality, and security of future releases.
```
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics = True)
```
## Creating Sparse Data
```
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.model_selection import train_test_split
remove = ('headers', 'footers', 'quotes')
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
data_train = fetch_20newsgroups(subset = 'train', categories = categories,
shuffle = True, random_state = 42,
remove = remove)
X_train, X_validation, y_train, y_validation = train_test_split(data_train.data, data_train.target, test_size = 0.33, random_state = 42)
vectorizer = HashingVectorizer(stop_words = 'english', alternate_sign = False,
n_features = 2**16)
X_train = vectorizer.transform(X_train)
X_validation = vectorizer.transform(X_validation)
summary_df = pd.DataFrame(index = ['No of Samples', 'No of Features'])
summary_df['Train Set'] = [X_train.shape[0], X_train.shape[1]]
summary_df['Validation Set'] = [X_validation.shape[0], X_validation.shape[1]]
summary_df
```
## Configure AutoML
Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.
|Property|Description|
|-|-|
|**task**|classification or regression|
|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>balanced_accuracy</i><br><i>average_precision_score_weighted</i><br><i>precision_score_weighted</i>|
|**max_time_sec**|Time limit in seconds for each iteration.|
|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|
|**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.<br>**Note:** If input data is sparse, you cannot use *True*.|
|**X**|(sparse) array-like, shape = [n_samples, n_features]|
|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|
|**X_valid**|(sparse) array-like, shape = [n_samples, n_features] for the custom validation set.|
|**y_valid**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification for the custom validation set.|
|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|
```
automl_config = AutoMLConfig(task = 'classification',
debug_log = 'automl_errors.log',
primary_metric = 'AUC_weighted',
max_time_sec = 3600,
iterations = 5,
preprocess = False,
verbosity = logging.INFO,
X = X_train,
y = y_train,
X_valid = X_validation,
y_valid = y_validation,
path = project_folder)
```
## Train the Model
Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.
In this example, we specify `show_output = True` to print currently running iterations to the console.
```
local_run = experiment.submit(automl_config, show_output=True)
```
## Explore the Results
#### Widget for Monitoring Runs
The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details.
```
from azureml.train.widgets import RunDetails
RunDetails(local_run).show()
```
#### Retrieve All Child Runs
You can also use SDK methods to fetch all the child runs and see individual metrics that we log.
```
children = list(local_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
```
### Retrieve the Best Model
Below we select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
```
best_run, fitted_model = local_run.get_output()
```
#### Best Model Based on Any Other Metric
Show the run and the model which has the smallest `accuracy` value:
```
# lookup_metric = "accuracy"
# best_run, fitted_model = local_run.get_output(metric = lookup_metric)
```
#### Model from a Specific Iteration
Show the run and the model from the third iteration:
```
# iteration = 3
# best_run, fitted_model = local_run.get_output(iteration = iteration)
```
### Register the Fitted Model for Deployment
```
description = 'AutoML Model'
tags = None
local_run.register_model(description = description, tags = tags)
local_run.model_id # Use this id to deploy the model as a web service in Azure.
```
### Testing the Fitted Model
```
# Load test data.
import sklearn
from pandas_ml import ConfusionMatrix
remove = ('headers', 'footers', 'quotes')
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
data_test = fetch_20newsgroups(subset = 'test', categories = categories,
shuffle = True, random_state = 42,
remove = remove)
vectorizer = HashingVectorizer(stop_words = 'english', alternate_sign = False,
n_features = 2**16)
X_test = vectorizer.transform(data_test.data)
y_test = data_test.target
# Test our best pipeline.
y_pred = fitted_model.predict(X_test)
y_pred_strings = [data_test.target_names[i] for i in y_pred]
y_test_strings = [data_test.target_names[i] for i in y_test]
cm = ConfusionMatrix(y_test_strings, y_pred_strings)
print(cm)
cm.plot()
```
| github_jupyter |
<table> <tr>
<td style="background-color:#ffffff;">
<a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="25%" align="left"> </a></td>
<td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
prepared by <a href="http://abu.lu.lv" target="_blank">Abuzer Yakaryilmaz</a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
</td>
</tr></table>
<table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
<h2> <font color="blue"> Solutions for </font> Matrices: Tensor Product </h2>
<a id="task1"></a>
<h3> Task 1 </h3>
Find $ u \otimes v $ and $ v \otimes u $ for the given vectors $ u = \myrvector{-2 \\ -1 \\ 0 \\ 1} $ and $ v = \myrvector{ 1 \\ 2 \\ 3 } $.
<h3>Solution</h3>
```
u = [-2,-1,0,1]
v = [1,2,3]
uv = []
vu = []
for i in range(len(u)): # one element of u is picked
for j in range(len(v)): # now we iteratively select every element of v
uv.append(u[i]*v[j]) # this one element of u is iteratively multiplied with every element of v
print("u-tensor-v is",uv)
for i in range(len(v)): # one element of v is picked
for j in range(len(u)): # now we iteratively select every element of u
vu.append(v[i]*u[j]) # this one element of v is iteratively multiplied with every element of u
print("v-tensor-u is",vu)
```
<a id="task2"></a>
<h3> Task 2 </h3>
Find $ A \otimes B $ for the given matrices
$
A = \mymatrix{rrr}{-1 & 0 & 1 \\ -2 & -1 & 2} ~~\mbox{and}~~
B = \mymatrix{rr}{0 & 2 \\ 3 & -1 \\ -1 & 1 }.
$
<h3>Solution</h3>
```
A = [
[-1,0,1],
[-2,-1,2]
]
B = [
[0,2],
[3,-1],
[-1,1]
]
print("A =")
for i in range(len(A)):
print(A[i])
print() # print a line
print("B =")
for i in range(len(B)):
print(B[i])
# let's define A-tensor-B as a (6x6)-dimensional zero matrix
AB = []
for i in range(6):
AB.append([])
for j in range(6):
AB[i].append(0)
# let's find A-tensor-B
for i in range(2):
for j in range(3):
# for each A(i,j) we execute the following codes
a = A[i][j]
# we access each element of B
for m in range(3):
for n in range(2):
b = B[m][n]
# now we put (a*b) in the appropriate index of AB
AB[3*i+m][2*j+n] = a * b
print() # print a line
print("A-tensor-B =")
print() # print a line
for i in range(6):
print(AB[i])
```
<a id="task3"></a>
<h3> Task 3 </h3>
Find $ B \otimes A $ for the given matrices
$
A = \mymatrix{rrr}{-1 & 0 & 1 \\ -2 & -1 & 2} ~~\mbox{and}~~
B = \mymatrix{rr}{0 & 2 \\ 3 & -1 \\ -1 & 1 }.
$
<h3>Solution</h3>
```
A = [
[-1,0,1],
[-2,-1,2]
]
B = [
[0,2],
[3,-1],
[-1,1]
]
print() # print a line
print("B =")
for i in range(len(B)):
print(B[i])
print("A =")
for i in range(len(A)):
print(A[i])
# let's define B-tensor-A as a (6x6)-dimensional zero matrix
BA = []
for i in range(6):
BA.append([])
for j in range(6):
BA[i].append(0)
# let's find B-tensor-A
for i in range(3):
for j in range(2):
# for each B(i,j) we execute the following codes
b = B[i][j]
# we access each element of A
for m in range(2):
for n in range(3):
a = A[m][n]
# now we put (a*b) in the appropriate index of AB
BA[2*i+m][3*j+n] = b * a
print() # print a line
print("B-tensor-A =")
print() # print a line
for i in range(6):
print(BA[i])
```
| github_jupyter |

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/TellingTime/telling-time.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
```
from IPython.display import HTML
from IPython.display import YouTubeVideo
import myMagics
%uiButtons
```
*Note: Run the cell above then click on the "Initialize" button to get notebook ready*
# Telling Time
Time is a concept we are all very familiar with. Seconds, minutes, hours, and days are simply part of our everyday life. Have you ever wondered about how all these units of time relate or why we use "am" and "pm" when talking about the hours of the day?
There are two important distinctions to make when thinking of time:
1. Telling the time - looking at a clock and knowing what time it is.
2. Measuring the time - using a clock or other tools to measure how long something takes.
In this notebook we will explore the relationships between all the different units of time and how to use different tools to read and keep track of time. We will also learn how to easily convert between hours, minutes and seconds.
## A Little History
When thinking of the different times of the day one thing stands out: there are two of each hour in the day; for example, 2 AM and 2 PM. Why is that?
This dates back to the Roman times. It was decided a day should be split up into two parts, one part for day time and the other for night time.
Eventually this split was made to use noon and midnight as the points where it changes from one part of the day to the other
- AM means "ante meridiem" which means "before midday"
- PM means "post meridiem" which means "after midday"
Click on the "More" button for more details
<div class="hideMe">
Initially to do this split the Romans decided to break the day up into two 12 hour blocks. As we can imagine from back in those days it would only be logical to make the 12 hours in the "day" start at sunrise and the 12 hours of the "night" start at dusk. But since the day/night cycle changes over the year (shorter days in winter for example), this caused problems.<br>
Eventually it was decided to change from sunset/dusk to midnight/midday, this is where AM and PM where born. AM means "ante meridiem" which stands for "before midday" and PM means "post meridiem" meaning, you guessed it, "after midday". When you think about it this makes sense: 3PM is 3 hours past the midday. Eventually it was decided that keeping one day split up into 24 hours instead of two blocks of 12 hours made more sense. The 24 hour clock was then introduced. The hours on this clock range from 0 to 23 (totalling 24 hours). We in North America still frequently use the AM/PM 12 hour day format but many parts of the world use the 24 hour clock.
```
%toggleMore
```
## How Well do you Know Time?
Ok so now that we have a little background on how our measurements of time came about, let's play a little game. Do you think you can guess exactly 15 seconds without using a clock? If you think you can do it, click the button below. When you think it’s been 15 seconds click it again and see how close you are. (Click again if you want to retry)
```
HTML(filename='TimeGuessWidget.html')
```
So how did you do? Not as easy as it seems eh. Most people, when trying this the first time, end up clicking much too early. You may have counted up to 15 "Mississippi’s" in your head which can help get closer to 15 seconds, but what if I asked you to guess 3 minutes? Rhythmically counting to 180 "Mississippi’s" is not particularly fun . This example shows the importance of using tools to more accurately measure time.
## Reading Time
Long ago before electricity was invented, ancient civilizations had to find other cleaver ways of reading time. Two common techniques used where:
1. Using the position of the sun to know the time of day
<img src="https://upload.wikimedia.org/wikipedia/commons/6/66/Sundial_-_Canonical_Hour.jpg" width="400" style="box-shadow: 4px 4px 12px gray;margin: 5px;">
2. Studying the position of the stars was used to know time in the night
<img src="https://c.tadst.com/gfx/750x500/tell-time-with-stars.png?1" width="400" style="box-shadow: 4px 4px 12px gray;margin: 5px;">
Now days the time is everywhere and is easily accessible. The main two ways of displaying time are using a digital clock or analog. Digital represents the time as numbers, an analog clock represents the time using clock hands going in circle.
<div class="hideMe">
These days the time is everywhere. We simply look for a clock on a wall, appliance, a watch, or our phones. This was not always the case though. Does this mean time did not exist long ago? Egyptians 1500 BC (roughly 3500 years ago) were very much aware of time and found very clever ways for measuring it. For these ancient civilizations, as for us, knowing the time and months of the year was crucial to their survival. It turns out the most important clocks of all where not found on earth but in the sky! The stars and sun have been used to measure time for thousands of years and by many civilizations. Using the position of the sun casting shadows was used during the day and the position of known constellations were used at night.
Luckily for us we have evolved far beyond using the sun and stars to tell time. Imagine trying to get to school on time on a cloudy day! Now when we get a new watch or clock we simply synchronize it to match another time piece showing the correct time. More and more, as devices are connected to the internet we don’t even have to set the time, it is done automatically!
```
%toggleMore
```
## Units of Time
So if I ask you what time it is right now you could easily look at a clock and tell me right? For example you could go as far as saying "it is 11:37:12 on Monday December 17, 2018". Now, that is probably a lot more information then was asked when asking for the time but bear with me. Let's break down all the components of that sentence:
- 1 year is made up of 365 days (366 if the year is leap)
- 1 day is made up of 24 hours
- 1 hour is made up of 60 minutes
- 1 minutes is made up of 60 seconds
- 1 second is made up of 1000 milliseconds
We could keep going but already 1 millisecond happens so fast that they are rarely used in everyday life.
Let's visualize this by using an analog clock. If you count all the ticks around the clock you will find out that there are 60 of them. This makes sense, as 1 one hour is made up of 60 minutes and 1 minute is made up of 60 seconds. In everyday life we know a clock only ever goes forward, and some might say it moves relatively slow. This can make it hard to fully understand its pattern. This example breaks these rules and allows you to manipulate the clock forward and backwards, fast or slow.
If you adjust the slider below the clock you will see that the hands will begin to move. Each tick on the slider represents a second. Try adjusting the time to see how each hand behaves. (You can also use your keyboard's side arrows to tick through 1 second at a time)
```
from IPython.display import HTML
HTML(filename='ClockWidget.html')
```
What have you noticed about the relationships between the hands as you slide back and forth?
Two important things to notice about the clock:
1. In order for the minute hand (blue) to move one full tick the seconds hand (red) must do one full rotation
2. When the minute hand does a full rotation the hour hand will have moved 5 ticks
Why does the hour hand move 5 ticks per minute rotation? That is because a day has 24 hours, not 60 hours. Remember earlier when we talked about AM and PM, the 24 hour day was broken down into two 12 hour sections. So we can see that if we divide a full rotation (60 minutes) into 12 hours we get $$60\div12=5$$ This means in the time the minute hand does a full rotation, meaning 60 minutes, the hour hand will advance 5 ticks on the clock. You will see this happening if you slide the slider from the far left all the way to the far right, the minute hand will have done a full rotation and the hour hand will have moved 5 ticks.
Now that we have a better understanding of the relationships between the units, can we figure out how many seconds are in 1 hour? Sure we can! Let's think about this. In 1 hour the minute hand goes around 60 times and for each one of these minutes the seconds hand goes around 60 times, this must mean $$60_\frac{min}{hr} \times60_\frac{sec}{min}=3600_\frac{sec}{hr}$$ So 1 hour has 3600 seconds. This means if you use your keyboard arrows on the slider from left to right you will need to push it 3600 times!! (Don't do that.)
Based on this math can you figure out how many seconds are in a day? or how many minutes are in a week?
## Measuring Time
So as we all know being able to tell the time is a crucial part of our everyday lives. It helps us know when we have appointments, when we should eat or when we should go to sleep.
Time also has many other great uses like keeping time for a hockey game or measuring how long it takes to drive from one city to another.
Let's take travelling from city to another as an example. Say you are going from Calgary to Edmonton and you want to calculate how long the trip takes. The simplest way of doing this without extra tools is to write down the time when you leave and then check the time when you arrive. Now all we do is take the difference between these two times.
Let's say you leave at 1:32 and arrive at 4:47. You can probably estimate in your head that the trip took a little over 3 hours, but we can calculate it exactly. To make this simpler we will convert the format from hours:minutes to just have minutes. Let's recall how many minutes are in 1 hour to get the following:
$$(1_{hr}\times60_\frac{min}{hr})+32_{min}=60_{min} + 32_{min}=92_{min}$$
$$(4_{hr}\times60_\frac{min}{hr})+47_{min}=240_{min} + 47_{min}=287_{min}$$
*Notice these times in minutes actually mean 92 min and 287 min past noon respectively*
And now we get the difference:
$$287_{min}-92_{min}=195_{min}$$
So the trip took 195 minutes to get from Calgary to Edmonton. To get it back into the hours:minutes format we need to figure out how many times $195$ can be divided by $60$. We can see that $60$ will fit $3$ times inside $195$ and we will be left with a remainder of $15$, so the trip took $3$ hours and $15$ minutes.
Ok so that wasn't too bad. It took some work to get an exact value but it is definitely doable. Now let's take our hockey example and look at how we could keep track of the time for the game. A few things to consider before attempting this
1. The time goes backwards from 20 minutes down to 0 milliseconds
2. The time has to stop every time the whistle is blown
3. The time has to be accurate to the 100th millisecond
Analyzing this problem we can quickly see that if all we have is a regular clock then a hockey game would take a very long time between each whistle blown as someone would have to calculate the differences in time between each stop and start. Thankfully we have many different types of tools to measure times like this. In this case a scoreboard with a timeclock built in does the trick.
Now all the time keeper has to do is stop and start time as the whistle is blown and the play starts again.
<div class="hideMe">
In other sports sometimes a fraction of a second makes the difference between first and second place. Precise measurements of time may also be needed during a critical science experiment. With examples like these we can see that a person's reflex to start/stop a clock is probably not going to cut it. Many other techniques have been developed to overcome these challenges. Laser sensors are far more accurate then the human hand-to-eye coordination.
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/c/c0/LightBeamTiming.jpg/220px-LightBeamTiming.jpg" style="box-shadow: 4px 4px 12px gray;">
The image above is of a light beam sensor used on the Olympic track.
```
%toggleMore
```
## 24-Hour Clock
As we now know a day has 24 hours. We also know why AM and PM were introduced and still used today. Another wildly popular way of representing time is to use the 24-hour clock. The 24-hour clock eliminates the need to us AM and PM. Using the 24-hour clock simply means we don't go back to one after we pass noon, we keep going from 12 to 13. This may seem odd at first since saying it is 13 o'clock is not something we are use to.
One major benefit to using this format is that you will never set your alarm wrong by putting 8PM instead of 8AM; 8 just means 8 in the morning and 20 means 8 at night.
If you use this format enough, knowing that 16:03 simply means 4:03pm becomes second nature, but you're probably wondering how to quickly get this answer when you are not used to it yet.
All you have to do is take the hour $16$ and subtract $12$ from it so $$16-12=4\text{ o'clock PM.}$$ A good way to quickly do this in your head is to first take away $10$ which is easy to do then remove the last $2$, so $$16-10=6,$$ and then $$6-2=4\text{ o'clock.}$$ Give this a try: what time is 18:39? How about 22:18?
Many modern watches, smartphones, alarm clocks, etc. allow you to use the 24 hour clock. Try it out for a week or two and see how fast you adjust to this format.
## Different Ways to Express Time
Now that we have a much better understanding of how time works and the relationships between the different units we can start getting creative and come up with other ways to express time. Check out this video for an abstract wooden pendulum clock
```
YouTubeVideo('9ZzkMIrWdPE', width=800, height=550)
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/9ZzkMIrWdPE" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
```
Here is another abstract way of telling time, can you decipher what each colour represent?
You can speed up the clock to see how each ring behaves over time, when you think you have figured it out check your answers below.
```
from IPython.display import HTML
HTML(filename='AbsClockWidget.html')
from IPython.display import HTML
HTML(filename='questions.html')
```
## Conclusion
In this notebook we explored:
1. Some history of time and where AM and PM comes from
2. The relationships between the different units of time and how the behave together
3. Examples of tools to use the time in different ways (Time for a sports game)
4. How to use the 24-hour clock
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| github_jupyter |
```
import os
import sys
import math
import json
import torch
import numpy as np
import scipy.io
from scipy import ndimage
import matplotlib
# from skimage import io
# matplotlib.use("pgf")
matplotlib.rcParams.update({
# 'font.family': 'serif',
'font.size':10,
})
from matplotlib import pyplot as plt
import pytorch_lightning as pl
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import TensorBoardLogger
seed_everything(42)
import DiffNet
from DiffNet.DiffNetFEM import DiffNet2DFEM
from torch.utils import data
# from e1_stokes_base_resmin import Stokes2D
from pytorch_lightning.callbacks.base import Callback
from e2_ns_fps_resmin import OptimSwitchLBFGS, NS_FPS_Dataset, NS_FPS
def plot_contours(module, u, v, p, u_x_gp, v_y_gp, path=None):
self = module
fig, axs = plt.subplots(3, 3, figsize=(6*3,3*3),
subplot_kw={'aspect': 'auto'}, squeeze=True)
for i in range(axs.shape[0]-1):
for j in range(axs.shape[1]):
axs[i,j].set_xticks([])
axs[i,j].set_yticks([])
div_gp = u_x_gp + v_y_gp
div_elmwise = torch.sum(div_gp, 0)
div_total = torch.sum(div_elmwise)
interp_method = 'bilinear'
im0 = axs[0,0].imshow(u,cmap='jet', origin='lower', interpolation=interp_method)
fig.colorbar(im0, ax=axs[0,0]); axs[0,0].set_title(r'$u_x$')
im1 = axs[0,1].imshow(v,cmap='jet',origin='lower', interpolation=interp_method)
fig.colorbar(im1, ax=axs[0,1]); axs[0,1].set_title(r'$u_y$')
im2 = axs[0,2].imshow(p,cmap='jet',origin='lower', interpolation=interp_method)
fig.colorbar(im2, ax=axs[0,2]); axs[0,2].set_title(r'$p$')
im3 = axs[1,0].imshow(div_elmwise,cmap='jet',origin='lower', interpolation=interp_method)
fig.colorbar(im3, ax=axs[1,0]); axs[1,0].set_title(r'$\int(\nabla\cdot u) d\Omega = $' + '{:.3e}'.format(div_total.item()))
im4 = axs[1,1].imshow((u**2 + v**2)**0.5,cmap='jet',origin='lower', interpolation=interp_method)
fig.colorbar(im4, ax=axs[1,1]); axs[1,1].set_title(r'$\sqrt{u_x^2+u_y^2}$')
x = np.linspace(0, 1, u.shape[1])
y = np.linspace(0, 1, u.shape[0])
xx , yy = np.meshgrid(x, y)
print(x.shape)
print(y.shape)
print(xx.shape)
print(yy.shape)
print(u.shape)
print(v.shape)
im5 = axs[1,2].streamplot(xx, yy, u, v, color='k', cmap='jet'); axs[1,2].set_title("Streamlines")
mid_idxX = int(self.domain_sizeX/2)
mid_idxY = int(self.domain_sizeY/2)
# im = axs[2,0].plot(self.dataset.y[:,0], u[:,0],label='u_inlet')
im = axs[2,0].plot(self.dataset.x[mid_idxY,:], u[mid_idxY,:],label='u_mid')
im = axs[2,1].plot(self.dataset.x[mid_idxY,:], v[mid_idxY,:],label='v_mid')
im = axs[2,2].plot(self.dataset.x[mid_idxY,:], p[mid_idxY,:],label='p_mid')
if not path == None:
plt.savefig(path)
# im = axs[2,0].plot(self.dataset.y[:,mid_idx], u[:,mid_idx],label='DiffNet')
# im = axs[2,0].plot(self.midline_Y,self.midline_U,label='Numerical')
# axs[2,0].set_xlabel('y'); axs[2,0].legend(); axs[2,0].set_title(r'$u_x @ x=0.5$')
# im = axs[2,1].plot(self.dataset.x[mid_idx,:], v[mid_idx,:],label='DiffNet')
# im = axs[2,1].plot(self.midline_X,self.midline_V,label='Numerical')
# axs[2,1].set_xlabel('x'); axs[2,1].legend(); axs[2,1].set_title(r'$u_y @ y=0.5$')
# im = axs[2,2].plot(self.dataset.x[-1,:], p[-1,:],label='DiffNet')
# im = axs[2,2].plot(self.midline_X,self.topline_P,label='Numerical')
# axs[2,2].set_xlabel('x'); axs[2,2].legend(); axs[2,2].set_title(r'$p @ y=1.0$')
# fig.suptitle("Re = {:.1f}, N = {}, LR = {:.1e}".format(self.Re, self.domain_size, self.learning_rate), fontsize=12)
# plt.savefig(os.path.join(self.logger[0].log_dir, 'contour_' + str(self.current_epoch) + '.png'))
# self.logger[0].experiment.add_figure('Contour Plots', fig, self.current_epoch)
# plt.close('all')
lx = 12.
ly = 6.
Nx = 128
Ny = 64
domain_size = 32
Re = 1.
dir_string = "ns_fps"
max_epochs = 50001
plot_frequency = 100
LR = 5e-3
opt_switch_epochs = max_epochs
load_from_prev = False
load_version_id = 25
x = np.linspace(0, lx, Nx)
y = np.linspace(0, ly, Ny)
xx , yy = np.meshgrid(x, y)
dataset = NS_FPS_Dataset(domain_lengths=(lx,ly), domain_sizes=(Nx,Ny), Re=Re)
if load_from_prev:
print("LOADING FROM PREVIOUS VERSION: ", load_version_id)
case_dir = './ns_fps/version_'+str(load_version_id)
net_u = torch.load(os.path.join(case_dir, 'net_u.pt'))
net_v = torch.load(os.path.join(case_dir, 'net_v.pt'))
net_p = torch.load(os.path.join(case_dir, 'net_p.pt'))
else:
print("INITIALIZING PARAMETERS TO ZERO")
v1 = np.zeros_like(dataset.x)
v2 = np.zeros_like(dataset.x)
p = np.zeros_like(dataset.x)
u_tensor = np.expand_dims(np.array([v1,v2,p]),0)
# network = torch.nn.ParameterList([torch.nn.Parameter(torch.FloatTensor(u_tensor), requires_grad=True)])
net_u = torch.nn.ParameterList([torch.nn.Parameter(torch.FloatTensor(u_tensor[:,0:1,:,:]), requires_grad=True)])
net_v = torch.nn.ParameterList([torch.nn.Parameter(torch.FloatTensor(u_tensor[:,1:2,:,:]), requires_grad=True)])
net_p = torch.nn.ParameterList([torch.nn.Parameter(torch.FloatTensor(u_tensor[:,2:3,:,:]), requires_grad=True)])
# print("net_u = \n", net_u[0])
# print("net_v = \n", net_v[0])
# print("net_p = \n", net_p[0])
network = (net_u, net_v, net_p)
basecase = NS_FPS(network, dataset, domain_lengths=(lx,ly), domain_sizes=(Nx,Ny), batch_size=1, fem_basis_deg=1, learning_rate=LR, plot_frequency=plot_frequency)
# Initialize trainer
logger = pl.loggers.TensorBoardLogger('.', name=dir_string)
csv_logger = pl.loggers.CSVLogger(logger.save_dir, name=logger.name, version=logger.version)
early_stopping = pl.callbacks.early_stopping.EarlyStopping('loss',
min_delta=1e-8, patience=10, verbose=False, mode='max', strict=True)
checkpoint = pl.callbacks.model_checkpoint.ModelCheckpoint(monitor='loss',
dirpath=logger.log_dir, filename='{epoch}-{step}',
mode='min', save_last=True)
lbfgs_switch = OptimSwitchLBFGS(epochs=opt_switch_epochs)
trainer = Trainer(gpus=[0],callbacks=[early_stopping,lbfgs_switch],
checkpoint_callback=checkpoint, logger=[logger,csv_logger],
max_epochs=max_epochs, deterministic=True, profiler="simple")
# Training
trainer.fit(basecase)
# Save network
torch.save(basecase.net_u, os.path.join(logger.log_dir, 'net_u.pt'))
torch.save(basecase.net_v, os.path.join(logger.log_dir, 'net_v.pt'))
torch.save(basecase.net_p, os.path.join(logger.log_dir, 'net_p.pt'))
# Query
basecase.dataset[0]
inputs, forcing = basecase.dataset[0]
u, v, p, u_x, v_y = basecase.do_query(inputs, forcing)
u = u.squeeze().detach().cpu()
v = v.squeeze().detach().cpu()
p = p.squeeze().detach().cpu()
u_x = u_x.squeeze().detach().cpu()
v_y = v_y.squeeze().detach().cpu()
# plot
plot_contours(basecase, u, v, p, u_x, v_y)
# separate query
version_id = 81
case_dir = './ns_fps/version_'+str(version_id)
dataset = NS_FPS_Dataset(domain_lengths=(lx,ly), domain_sizes=(Nx,Ny), Re=Re)
net_u = torch.load(os.path.join(case_dir, 'net_u.pt'))
net_v = torch.load(os.path.join(case_dir, 'net_v.pt'))
net_p = torch.load(os.path.join(case_dir, 'net_p.pt'))
# network = (net_u, net_v, net_p)
network = (net_u.cpu(), net_v.cpu(), net_p.cpu())
equation = NS_FPS(network, dataset, domain_lengths=(lx,ly), domain_sizes=(Nx,Ny), batch_size=1, fem_basis_deg=1, learning_rate=LR, plot_frequency=plot_frequency)
# Query
inputs, forcing = equation.dataset[0]
u, v, p, u_x, v_y = equation.do_query(inputs, forcing)
u = u.squeeze().detach().cpu()
v = v.squeeze().detach().cpu()
p = p.squeeze().detach().cpu()
u_x = u_x.squeeze().detach().cpu()
v_y = v_y.squeeze().detach().cpu()
obj_left_idx = dataset.obj_left_idx
obj_rght_idx = dataset.obj_rght_idx
obj_bttm_idx = dataset.obj_bttm_idx
obj_top__idx = dataset.obj_top__idx
u[obj_bttm_idx:obj_top__idx, obj_left_idx:obj_rght_idx] = float('inf')
v[obj_bttm_idx:obj_top__idx, obj_left_idx:obj_rght_idx] = float('inf')
p[obj_bttm_idx:obj_top__idx, obj_left_idx:obj_rght_idx] = float('inf')
# plot
filepath = os.path.join(case_dir,'query_ns_fps.png')
plot_contours(equation, u, v, p, u_x, v_y, filepath)
net_u.cpu()
net_u
simdata = np.loadtxt('ns-ldc-numerical-results/re-30-ns-L12-H6-midlineX.csv', skiprows=1,delimiter=',')
fig, axs = plt.subplots(3, 3, figsize=(6*3,3.6*3), subplot_kw={'aspect': 'auto'}, squeeze=True)
axs[0,0].plot(simdata[:,0], simdata[:,2],label='num')
axs[0,1].plot(simdata[:,0], simdata[:,3],label='num')
axs[0,2].plot(simdata[:,0], simdata[:,1],label='num')
mid_idxX = int(Nx/2)
mid_idxY = int(Ny/2)
axs[0,0].plot(equation.dataset.x[mid_idxY,:], u[mid_idxY,:],label='u_mid'); axs[0,0].legend()
axs[0,1].plot(equation.dataset.x[mid_idxY,:], v[mid_idxY,:],label='v_mid'); axs[0,1].legend()
axs[0,2].plot(equation.dataset.x[mid_idxY,:], p[mid_idxY,:],label='p_mid'); axs[0,2].legend()
simdataY = np.loadtxt('ns-ldc-numerical-results/re-30-ns-L12-H6-midlineY.csv', skiprows=1,delimiter=',')
fig, axs = plt.subplots(3, 3, figsize=(6*3,3.6*3), subplot_kw={'aspect': 'auto'}, squeeze=True)
axs[0,0].plot(simdataY[:,0], simdataY[:,2],label='num')
axs[0,1].plot(simdataY[:,0], simdataY[:,3],label='num')
axs[0,2].plot(simdataY[:,0], simdataY[:,1],label='num')
mid_idxX = int(Nx/2)
mid_idxY = int(Ny/2)
axs[0,0].plot(equation.dataset.y[:,mid_idxY], u[:,mid_idxY],label='u_mid'); axs[0,0].legend()
axs[0,1].plot(equation.dataset.y[:,mid_idxY], v[:,mid_idxY],label='v_mid'); axs[0,1].legend()
axs[0,2].plot(equation.dataset.y[:,mid_idxY], p[:,mid_idxY],label='p_mid'); axs[0,2].legend()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 10)
import warnings
import logging
import os
import onnxruntime
from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper
# from azureml.automl.core.onnx_convert import OnnxInferenceHelper
import json
import time
from typing import Any, Tuple
from numpy import ndarray
class OnnxModelWrapper:
"""
helper class for prediction when using onnx model
"""
def __init__(self, onnx_model_bytes: bytes, onnx_input_map: dict):
"""
:param onnx_model_bytes: the onnx model in bytes
:param onnx_input_map: the onnx_resource dictionary
"""
self.onnx_model_bytes = onnx_model_bytes
self.onnx_input_map = onnx_input_map
self.wrapper_model = OnnxInferenceHelper(self.onnx_model_bytes, self.onnx_input_map)
def predict(self, X) -> Tuple[Any, Any]:
"""
predict by using OnnxInferenceHelper
:param X: features to predict
:returns tuple of <label, prob>
"""
return self.wrapper_model.predict(X)
def predict_proba(self, X) -> ndarray:
"""
predict proba by using OnnxInferenceHelper
:param X: features to predict
:returns ndarray of prob
"""
_, y_prob = self.wrapper_model.predict(X, with_prob=True)
return y_prob
onnx_model_file = open('onnx.model', 'rb')
onnx_res_file = open('onnx.res', 'r')
onnx_model_data = onnx_model_file.read()
onnx_res_data = onnx_res_file.read()
onnxrt_wrapper = OnnxModelWrapper(onnx_model_data, json.loads(onnx_res_data))
filepath = 'invoice.csv'
data_df = pd.read_csv(filepath)
data_df.head()
data_df.shape
start = time.time()
predictions = onnxrt_wrapper.predict_proba(data_df)
end = time.time()
print(end - start)
from azureml.explain.model.mimic.mimic_explainer import MimicExplainer
from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel
start = time.time()
explainer = MimicExplainer(onnxrt_wrapper, data_df, LGBMExplainableModel, augment_data=False)
end = time.time()
print(end - start)
def explain(train_data):
explanation = explainer.explain_global(train_data, include_local=False)
sample = data_df.sample(1000)
start = time.time()
explain(sample)
end = time.time()
print(end - start)
sample = data_df.sample(2000)
start = time.time()
explain(sample)
end = time.time()
print(end - start)
sample = data_df.sample(5000)
start = time.time()
explain(sample)
end = time.time()
print(end - start)
start = time.time()
explain(data_df)
end = time.time()
print(end - start)
data_df.shape
```
| github_jupyter |
```
from random import randint
from timeit import default_timer
size = 100
mat_1 = [[randint(0, size) for _ in range(size)] for _ in range(size)]
mat_2 = [[randint(0, size) for _ in range(size)] for _ in range(size)]
result = [[0 for _ in range(size)] for _ in range(size)]
```
### 1. Serial Implementation
```
starttime = default_timer()
for i in range(size):
for j in range(size):
for k in range(size):
result[i][j] += mat_1[i][k] * mat_2[k][j]
t1 = default_timer() - starttime
print("Serial Time Taken :", t1)
result = [[0 for _ in range(size)] for _ in range(size)]
```
### 2. Data Parallel Implementation
```
!pip install -q pymp-pypi
import pymp
starttime = default_timer()
res_arr = pymp.shared.array((size, size), dtype='uint8')
with pymp.Parallel(2) as p:
for i in p.range(size):
for j in range(size):
for k in range(size):
res_arr[i][j] += mat_1[i][k] * mat_2[k][j]
t2 = default_timer() - starttime
print("Parallel Time Taken :", t2)
```
### 3. Numpy
```
import numpy as np
starttime = default_timer()
res = np.dot(mat_1,mat_2)
t3 = default_timer() - starttime
print("Numpy Time Taken :", t3)
```
### 4. Scipy
```
from scipy import sparse
m1 = sparse.csr_matrix(mat_1)
m2 = sparse.csr_matrix(mat_2)
starttime = default_timer()
res = m1.multiply(m2)
t4 = default_timer() - starttime
print("Scipy Time Taken :", t4)
```
### 5. Pandas
```
import numpy as np
import pandas as pd
df_1 = pd.DataFrame(mat_1)
df_2 = pd.DataFrame(mat_2)
starttime = default_timer()
df_1.dot(df_2)
t5 = default_timer() - starttime
print("Pandas Time Taken :", t5)
```
### 6. Sympy
```
from sympy import Matrix
m1 = Matrix(mat_1)
m2 = Matrix(mat_2)
starttime = default_timer()
r = m1*m2
t6 = default_timer() - starttime
print("Sympy Time Taken :", t6)
```
### 7. Numba
```
import numpy as np
import timeit
from numba import jit, float64, prange
@jit('float64[:,:](float64[:,:],float64[:,:])', parallel=True, nopython=True)
def matmul(A, B):
C = np.zeros((A.shape[0], B.shape[1]))
for i in prange(A.shape[0]):
for j in prange(B.shape[1]):
for k in range(A.shape[0]):
C[i,j] = C[i,j] + A[i,k]*B[k,j]
return C
A = np.random.rand(size, size)
B = np.random.rand(size, size)
start = default_timer()
matmul(A, B)
t7 = default_timer() - start
print("Numba Time Taken :", t7)
```
### 8. Linalg
```
from numpy.linalg import multi_dot
start = default_timer()
_ = multi_dot([mat_1, mat_2])
t8 = default_timer() - start
print("linalg Time Taken :", t8)
```
### 9. Pymatrix
```
!wget https://raw.githubusercontent.com/dthul/pymatrix/master/matrix.py
import sys
sys.path.append('/content/')
from matrix import Matrix
m1 = Matrix(mat_1)
m2 = Matrix(mat_2)
start = default_timer()
res = m1 * m2
t9 = default_timer() - start
print("pymatrix Time Taken :", t9)
```
### 10. Tensorflow
```
from tensorflow.linalg import matmul
start = default_timer()
_ = matmul(mat_1, mat_2)
t10 = default_timer() - start
print("tensorflow Time Taken :", t10)
```
### 12. Pymc
```
!pip install -q pymc3
import pymc3 as pm
start = default_timer()
res = pm.math.dot(mat_1, mat_2)
t12 = default_timer() - start
print("pymc Time Taken :", t12)
import numpy as np
from functools import lru_cache, wraps
import numpy as np
def np_cache(*args, **kwargs):
"""LRU cache implementation for functions whose FIRST parameter is a numpy array
>>> array = np.array([[1, 2, 3], [4, 5, 6]])
>>> @np_cache(maxsize=256)
... def multiply(array, factor):
... print("Calculating...")
... return factor*array
>>> multiply(array, 2)
Calculating...
array([[ 2, 4, 6],
[ 8, 10, 12]])
>>> multiply(array, 2)
array([[ 2, 4, 6],
[ 8, 10, 12]])
>>> multiply.cache_info()
CacheInfo(hits=1, misses=1, maxsize=256, currsize=1)
"""
def decorator(function):
@wraps(function)
def wrapper(np_array, *args, **kwargs):
hashable_array = array_to_tuple(np_array)
return cached_wrapper(hashable_array, *args, **kwargs)
@lru_cache(*args, **kwargs)
def cached_wrapper(hashable_array, *args, **kwargs):
array = np.array(hashable_array)
return function(array, *args, **kwargs)
def array_to_tuple(np_array):
"""Iterates recursivelly."""
try:
return tuple(array_to_tuple(_) for _ in np_array)
except TypeError:
return np_array
# copy lru_cache attributes over too
wrapper.cache_info = cached_wrapper.cache_info
wrapper.cache_clear = cached_wrapper.cache_clear
return wrapper
return decorator
@np_cache(maxsize=256)
def sq_cache(array):
return array*array
starttime = default_timer()
l1 = np.array(mat_1)
sq_cache(l1)
t13 = default_timer() - starttime
print("Custom Time Taken :", t13)
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# methods = ['Serial','Parallel','Numpy','Scipy','Pandas','Sympy','Numba','Linalg','Pymatrix','TF','Pymc','Custom']
# times = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t12, t13]
methods = ['Serial','Parallel','Sympy','Pymatrix','TF','Pymc','Custom']
times = [t1, t2, t6, t9, t10, t12, t13]
ax.bar(methods,times)
ax.set_ylabel('Time in Seconds')
ax.set_title(f'Speedup Matrix Multiplication ({size}*{size})')
fig.set_size_inches(10, 3)
plt.yscale('log')
plt.show()
```
| github_jupyter |
```
#%%appyter init
from appyter import magic
magic.init(lambda _=globals: _())
```
### Load all necessary packages
```
import rpy2
import rpy2.robjects as robjects
import rpy2.robjects.packages as rpackages
from rpy2.robjects import numpy2ri, pandas2ri
import rpy2.ipython.html
rpy2.ipython.html.init_printing()
import rpy2.robjects.lib.ggplot2 as gp
from rpy2.ipython.ggplot import image_png
from rpy2.rinterface_lib import openrlib
robjects.r('''
## add the full path to folder inside which MetENP R package is installed; relative path may not work
.libPaths( c( .libPaths(), "/home/mano/.local/R" ))
##.libPaths( c( .libPaths(), "/var/www/html/appyters/MetENP/R" ))
print(.libPaths())
''')
;
%%appyter hide_code
import pandas as pd
from io import StringIO
_formatter = robjects.r('''
install.packages("dplyr")
function (X) {
return (
format_tsv(
as.data.frame(apply(X, c(1,2), function(x) {
unlist(x, TRUE, TRUE)
}))
)
)
}
''')
def as_df(data, index_col=None):
return pd.read_csv(StringIO(_formatter(data)[0]), sep='\t', index_col=index_col)
from rpy2.robjects.packages import importr
metenp = importr('MetENP')
%%appyter hide_code
{% do SectionField(
name='INPUT',
title='Input the Study, Factors and Analysis type Information',
subtitle='Specify your study here',
img='upload_icon.png',
)%}
{% do SectionField(
name='SIGM',
title='Find Significant Metabolites',
subtitle='Run significance of all the analysis summary together',
img='vol.png',
)%}
{% do SectionField(
name='COUNTCLASS',
title='Count Metabolites by Class',
subtitle='Count the metabolites of each class',
img='bar.png',
)%}
{% do SectionField(
name='ENRICH',
title='Enrichment Score by Class',
subtitle='Calculate the enrichment score of each metabolite class',
img='enrich.png',
)%}
{% do SectionField(
name='HEAT',
title='Heatmap',
subtitle='Heatmap of metabolites x enriched pathways',
img='heat.png',
)%}
{% do SectionField(
name='DOT',
title='Dotplot',
subtitle='Plot a dotplot of enrichment by class',
img='dot.png',
)%}
%%appyter code_exec
{% set study = MetabolomicsStudyField(
name='study',
label='Study Information',
description = "Resolve the factors and analysis type for the study",
default={'study_id': 'ST000915', 'fac1': 'Cirrhosis', 'fac2': 'Normal', 'anal': 'Core G Fatty acids/Eicosanoids', 'faccol': 'Diagnosis' },
section='INPUT',
)
%}
print("Study ID:", {{study.value.study_id|jsonify}})
%%appyter hide_code
{% set studyOrg = StringField(name='studyOrg', label='KEGG Organism Code', description = "hsa for human, mmu for mouse, eco for E. coli, rno for rat. See https://www.genome.jp/kegg/catalog/org_list.html for all organisms", default='hsa', section='INPUT') %}
%%appyter hide_code
{% set classtype = MultiChoiceField(
name='classtype',
label='Choose a Metabolite Class',
description='Choose between sub_class, main_class and super_class',
default=[
'sub_class',
],
choices=[
'sub_class',
'main_class',
'super_class',
],
section='INPUT'
) %}
```
### Start data processing
### Gets metabolomics data, metadata and metabolite info from Metabolomics Workbench using REST service
```
%%appyter code_exec
data_1 = robjects.r['getmwstudies']({{study.value.study_id|jsonify}},
'data'
)
%%appyter hide_code
data_1 = metenp.getmwstudies({{study.value.study_id|jsonify}}, 'data')
as_df(data_1, index_col=[0, 1, 2, 3, 4])
with openrlib.rlock:
robjects.r.assign('data_1', data_1)
robjects.r('''
data_1_print = data.frame(apply(data_1, c(1,2), function(x){unlist(x, TRUE, TRUE)}))
head(data_1_print)
''')
pass
```
### Gets metabolomics data, metadata and metabolite info from Metabolomics Workbench using REST service
```
%%appyter code_exec
metadata = robjects.r['getmwstudies']({{study.value.study_id|jsonify}},
'factors'
)
robjects.r.assign('metadata', metadata)
robjects.r('''
metadata_print = data.frame(apply(metadata, c(1,2), function(x){unlist(x, TRUE, TRUE)}))
head(metadata_print)
''')
```
### Associate metabolomics data to the refmet class
```
refmet_class = robjects.r['convert_refmet'](data_1)
robjects.r.assign('refmet_class', refmet_class)
robjects.r('''
refmet_class_print = data.frame(apply(refmet_class, c(1,2), function(x){unlist(x, TRUE, TRUE)}))
head(refmet_class_print)
''')
```
### This lists each unique metadata factor/independent variable
```
robjects.r('''
study_facs = data.frame(unique(metadata_print$factors))
''')
```
### This lists each different analysis terms
```
robjects.r('''
study_anals = data.frame(unique(data_1_print$analysis_summary))
''')
```
### Here, find significant metabolites and the significance of all the analysis summary together. The analysis summary/modes you got in the previous section.
```
%%appyter hide_code
%%appyter hide_code
{% set normtype = MultiChoiceField(
name='normtype',
label='Select Type of Normalization. How would you like to handle missing data?',
description='Select 1. half_of_min: where the NAs are replaced by half of min values in the data, 2. remove_NAs: where Cols with NAs values are removed, or 3. 50percent: where cols with more than 50% NAs values are removed. half_of_min is ideal when you wish to see which metabolites were present in either group. Very high fold change would mean it was present in either group.',
default=[
'50percent',
],
choices=[
'half_of_min',
'remove_NAs',
'50percent',
],
section='SIGM'
) %}
%%appyter hide_code
{% set pthres = MultiChoiceField(
name='pthres',
label='P-Value Threshhold',
description='P-Value Threshhold for Volcano Plot. Float between 0 and 1.',
default=[
'0.05',
],
choices=[
'0.001',
'0.002',
'0.005',
'0.01',
'0.02',
'0.05',
'0.10'
],
section='SIGM'
) %}
%%appyter hide_code
#0.0, 0.5, 0.6, 0.8, 1, 1.2, 1.5, 2, 2.5, 3
#TODO - don't do yet
{% set thres_log2foldchange = StringField(
name = 'tres_log2foldchange',
label = 'Log2FoldChange Threshhold',
description= "Log2FoldChange Threshhold for Volcano Plot.",
default='0.5',
section='SIGM'
) %}
%%appyter hide_code
{% set padj = MultiChoiceField(
name='padj',
label='Select P Adjust Method',
description='P adjust method for significance analysis of metabolites',
default=[
'fdr',
],
choices=[
'fdr',
'BH',
'holm',
'bonferroni',
'hochberg',
'hommel',
],
section='SIGM'
) %}
%%appyter code_eval
robjects.r.assign('refmet_class', refmet_class)
robjects.r.assign('metcol', 'metabolite_name')
robjects.r.assign('fac1', {{study.value.fac1|jsonify}})
robjects.r.assign('fac2', {{study.value.fac2|jsonify}})
robjects.r.assign('faccol', {{study.value.faccol|jsonify}})
robjects.r.assign('samcol', 'local_sample_id')
robjects.r.assign('normtype', {{normtype}})
robjects.r.assign('pthres', {{pthres}})
robjects.r.assign('thres_log2foldchangevar', {{thres_log2foldchange}})
robjects.r.assign('padj', {{padj}})
robjects.r('''
ptreshnum<-as.numeric(pthres)
thres_log2foldchangenum<-as.numeric(thres_log2foldchangevar)
padjchar<-as.character(padj[1])
''')
;
%%appyter code_exec
robjects.r.assign('anal',{{study.value.anal|jsonify}} )
```
### Find the log2fold change and p value on metabolomics data using a t-test.
```
robjects.r('''
analysis_type_sep = "___";
anal_vec = trimws(unlist(strsplit(anal, analysis_type_sep)))
print(anal_vec)
stats_metabolites = significant_met(metabolomics_data=refmet_class_print, met_col='metabolite_name',analysis_type=anal_vec, metadata=metadata, factor1=fac1, factor2=fac2, factor_col=faccol,sample_col='local_sample_id', p_adjust=padjchar,normalization=normtype)
''')
;
robjects.r('''
sig_metabolites = stats_metabolites[which(stats_metabolites[,"pval"] <= ptreshnum&abs(stats_metabolites[,"log2Fold_change"])>thres_log2foldchangenum),]
''')
;
image_png(robjects.r('''
plot_volcano(stats_metabolites, thres_pval= ptreshnum, thres_log2foldchange = thres_log2foldchangenum, TRUE)
'''))
```
Each color describes a different metabolite group.
### Map metabolite class of the significant metabolites utilzing refmet classification in Metabolomics Workbench
This function not only maps metabolite to metabolite class but also to external databases such as pubchem, inchi key, smiles, and KEGG (if you chose to add in KEGG).
```
with openrlib.rlock:
robjects.r('''
sig_metabolites_kegg_id= map_keggid(sig_metabolites)
''')
pass
```
#### Check all your significant metabolites have not been assigned metabolite class
Outcome should be 0 elements.
```
robjects.r('''
setdiff(sig_metabolites$refmet_name, sig_metabolites_kegg_id$refmet_name)
''')
```
### Count the number of metabolites in each of the metabolite classes and plot them with a bar graph
```
%%appyter hide_code
{% set thres_log2foldchange_countclass = StringField(
name = 'thres_log2foldchange_countclass',
label = 'Log2FoldChange Threshhold for counting',
description= "Log2FoldChange Threshhold for counting a metabolite in a class. Float between 0 and 1.",
default='0.5',
section='COUNTCLASS'
) %}
%%appyter code_exec
robjects.r.assign('classtype', {{classtype}})
robjects.r.assign('thres_log2foldchange_countclass', {{thres_log2foldchange_countclass}})
robjects.r('''
metclassvar <- as.character(classtype[1])
count_changes = metcountplot(df_metclass=sig_metabolites_kegg_id, metclass=metclassvar, plotting=TRUE, thres_logfC = thres_log2foldchange_countclass)
''')
image_png(robjects.r('''
count_changes$plotimg
'''))
```
### Enrichment score (p-value) for metabolite classes
Calculate the enrichment score (p-value) of each metabolite class.
Enrichment score (p-value) is calculated through the hypergeometric method as described by Choudhary et al. [https://www.biorxiv.org/content/10.1101/2020.11.20.391912v1].
```
%%appyter hide_code
{% set sigmets = IntField(
name = 'sigmets',
label = 'Minimum # of (most significant) metabolites needed in a class to include the class in enrichment.',
description= "We advise to use the number of metabolites in each class as 3 or more. However, if you want to know the enrichment score for all the metabolites, choose 1.",
default=3,
section = 'ENRICH'
) %}
%%appyter code_eval
robjects.r.assign('sigmets', {{sigmets}})
robjects.r('''
metenrichment = metclassenrichment(df_metclass=sig_metabolites_kegg_id,refmet_class, metclass=metclassvar,enrich_stats="HG",no=sigmets)
''')
;
```
## Plot the enrichment score (p-value) via function plot_met_enrichment
### If no figure shows here, or an error returns, this means no classes had enough significant metabolites. Decrease the minimum number of significant metabolites per class needed and reload/rerun the appyter program.
```
image_png(robjects.r('''
plot_met_enrichment(metenrichment, metclassvar,"HG", no=sigmets)
'''))
```
### Check the pathways with reactions involving the significant metabolites
```
%%appyter code_exec
robjects.r.assign('studyOrg', {{studyOrg}})
robjects.r('''
met_path = met_pathways(df_metenrichment = metenrichment, studyOrg)
head(met_path)
''')
```
### Get pathway enrichment score (p-value).
Once we have the pathway information, we can calculate enrichment score (p-value) of pathways using the hypergeometric scores for each pathway.
For hypergeometric score, we need to know the total number of KEGG compounds that are linked to KEGG pathways.
N = Total no. of compounds linked to KEGG pathway,
L = No. of compounds in a pathway,
M = No. of altered (significant) compounds in a pathway,
K = Total no. of altered (significant) compounds.
Then,
score or p-value = phyper(M-1, L, N-L, K)
This function also utilizes korg dataset from pathview package.
```
%%appyter hide_code
{% set keggTF = MultiChoiceField(
name='keggTF',
label='Analysis with KEGG',
description='Would you like the enrichment analysis to include all KEGG pathways?',
default=[
'FALSE',
],
choices=[
'TRUE',
'FALSE'
],
section='ENRICH'
) %}
%%appyter code_exec
robjects.r.assign('keggTF', {{keggTF}})
robjects.r('''
keggTFchar<-as.character(keggTF[1])
''')
%%appyter code_exec
# relative paths do not work, exclude the starting / as it gets added automatically when appyter runs
#korgvar = "{{ url_for('home/sumana/appyters/MetENP/static', filename='korg.RData') }}"
#ls_pathvar = "{{ url_for('home/sumana/appyters/MetENP/static', filename='ls_path.RData') }}"
korgvar = "{{ url_for('var/www/html/appyters/MetENP/static', filename='korg.RData') }}"
ls_pathvar = "{{ url_for('var/www/html/appyters/MetENP/static', filename='ls_path.RData') }}"
robjects.r.assign('korgvar', korgvar)
robjects.r.assign('ls_pathvar', ls_pathvar)
robjects.r('''
load(korgvar)
load(ls_pathvar)
''')
robjects.r('''
kegg_es = path_enrichmentscore(met_path,sig_metabolite_kegg_id=sig_metabolite_kegg_id,ls_path=ls_path,refmet_class=refmet_class,sps=studyOrg,padj=padjchar, kegg_comp_path=keggTFchar)
head(kegg_es)
''')
```
### Plot pathway network
Only plotting significant pathways (pathways with >= the minimum number significant metabolites). There is no p-value or adjusted p-value threshold applied for the pathways.
The pathway network is such that it shows metabolites that are connected to different pathways and same metabolite in different pathway. Color of nodes of metabolites are according to the fold change of metabolites (low expression in green and high in red) and size of pathway nodes (square nodes) are according to the number of branches (meaning no of metabolites). All metabolite are written in blue.
```
image_png(robjects.r('''
plot_pathway_networks (met_path,kegg_es, TRUE)
'''))
```
### Heatmap
```
%%appyter hide_code
{% set x_size_heat = IntField(
name = 'x_size_heat',
label = 'Font Size for X',
description= "To change the size of the heatmap",
default=8,
section = 'HEAT'
) %}
%%appyter hide_code
{% set y_size_heat = IntField(
name = 'y_size_heat',
label = 'Font Size for Y',
description= "To change the size of the heatmap",
default=6,
section = 'HEAT'
) %}
%%appyter code_exec
robjects.r.assign('x_size_heat', {{x_size_heat}})
robjects.r.assign('y_size_heat', {{y_size_heat}})
image_png(robjects.r('''
plot_heatmap(met_path, shorten_name=TRUE,refmet_name=FALSE, xaxis=x_size_heat, yaxis=y_size_heat)
'''))
```
### Dotplot
```
%%appyter hide_code
{% set x_size_dot = IntField(
name = 'x_size_dot',
label = 'Font Size for X',
description= "To change the size of the dotplot",
default=8,
section = 'DOT'
) %}
%%appyter hide_code
{% set y_size_dot = IntField(
name = 'y_size_dot',
label = 'Font Size for Y',
description= "To change the size of the dotplot",
default=6,
section = 'DOT'
) %}
%%appyter code_exec
robjects.r.assign('x_size_dot', {{x_size_dot}})
robjects.r.assign('y_size_dot', {{y_size_dot}})
image_png(robjects.r('''
dotplot_met_class_path (met_path, kegg_es, metclassvar,xaxis=x_size_dot,yaxis=y_size_dot)
'''))
```
### Get the gene and enzyme info
Here we get the information of genes involved in enriched pathways for specified organism using KEGG database.
```
robjects.r('''
met_gene_info = enzyme_gene_info (metenrichment, studyOrg, metclassvar)
head(met_gene_info)
''')
```
#### Get the information if metabolite is a reactant or substrate
```
robjects.r('''
rclass_info = react_substrate(met_gene_info)
head(rclass_info)
''')
```
#### Get gene info in short form
```
robjects.r('''
met_gene_info2=data.table::data.table(rclass_info)[,lapply(.SD, function(x) toString(unique(x))), by = 'Metabolite']
''')
```
| github_jupyter |
<img src='../../../../img/EU-Copernicus-EUM_3Logos.png' alt='Logo EU Copernicus EUMETSAT' align='right' width='50%'></img>
<br>
# 04 - Assignment - Solution
### About
> So far, we analysed Aerosol Optical Depth from different types of data (satellite, model-based and observations) for a single dust event. Let us now broaden our view and analyse the annual cycle in 2020 of Aerosol Optical Depth from AERONET and compare it with the CAMS global reanalysis data.
### Tasks
#### 1. Download and plot time-series of AERONET data for Santa Cruz, Tenerife in 2020
* **Hint**
* [AERONET - Example notebook](../../02_ground-based_observations/21_AERONET.ipynb)
* you can select daily aggregates of the station observations with setting `AVG=20`
* **Interpret the results:**
* Have there been other times in 2020 with increased AOD values?
* If yes, how could you find out if the increase in AOD is caused by dust? Try to find out by visualizing the AOD time-series together with another parameter from the AERONET data.
* [MSG SEVIRI Dust RGB](https://sds-was.aemet.es/forecast-products/dust-observations/msg-2013-eumetsat) and [MODIS RGB](https://worldview.earthdata.nasa.gov/) quick looks might be helpful to get a more complete picture of other events that might have happened in 2020.
#### 2. Download CAMS global reanalysis (EAC4) and select 2020 time-series for *Santa Cruz, Tenerife*
* **Hint**
* [CAMS global forecast - Example notebook](../../03_model-based_data/32_CAMS_global_forecast_duaod_load_browse.ipynb) (**Note:** the notebook works with CAMS forecast data, but they have a similar data structure to the CAMS global reanalysis data)
* [Data access](https://ads.atmosphere.copernicus.eu/cdsapp#!/dataset/cams-global-reanalysis-eac4?tab=form) with the following specifications:
> Variable on single levels: `Dust aerosol optical depth at 550 nm` <br>
> Date: `Start=2020-01-01`, `End=2020-12-31` <br>
> Time: `[00:00, 03:00, 06:00, 09:00, 12:00, 15:00, 18:00, 21:00]` <br>
> Restricted area: `N: 30., W: -20, E: 14, S: 20.` <br>
>Format: `netCDF` <br>
* With the xarray function `sel()` and keyword argument `method='nearest'` you can select data based on coordinate information
* We also recommend you to transform your xarray.DataArray into a pandas.DataFrame with the function `to_dataframe()`
#### 3. Visualize both time-series of CAMS reanalysis and AERONET daily aggregates in one plot
* **Interpret the results:** What can you say about the annual cycle in 2020 of AOD in Santa Cruz, Tenerife?
### Module outline
* [1 - Select latitude / longitude values for Santa Cruz, Tenerife](#select_lat_lon)
* [2 - Download and plot time-series of AERONET data](#aeronet)
* [3 - Download CAMS global reanalysis (EAC4) and select 2020 time-series for Santa Cruz, Tenerife](#cams_reanalysis)
* [4 - Combine both annual time-series and visualize both in one plot](#visualize_annual_ts)
<hr>
##### Load required libraries
```
%matplotlib inline
import os
import xarray as xr
import numpy as np
import netCDF4 as nc
import pandas as pd
from IPython.display import HTML
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib.cm import get_cmap
from matplotlib import animation
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import cartopy.feature as cfeature
from matplotlib.axes import Axes
from cartopy.mpl.geoaxes import GeoAxes
GeoAxes._pcolormesh_patched = Axes.pcolormesh
import wget
import warnings
warnings.simplefilter(action = "ignore", category = RuntimeWarning)
```
##### Load helper functions
```
%run ../functions.ipynb
```
<hr>
### <a id='select_lat_lon'></a>1. Select latitude / longitude values for Santa Cruz, Tenerife
You can see an overview of all available AERONET Site Names [here](https://aeronet.gsfc.nasa.gov/cgi-bin/draw_map_display_aod_v3?long1=-180&long2=180&lat1=-90&lat2=90&multiplier=2&what_map=4&nachal=1&formatter=0&level=3&place_code=10&place_limit=0).
```
lat = 28.473
lon = -16.247
```
<br>
### <a id='aeronet'></a>2. Download and plot time-series of AERONET data
As a first step, let us create a Python dictionary in which we store all the parameters we would like to use for the request as dictionary keys. You can initiate a dictionary with curled brackets `{}`. Below, we specify the following parameters:
* `endpoint`: Endpoint of the AERONET web service
* `station`: Name of the AERONET station
* `year`: year 1 of interest
* `month`: month 1 of interest
* `day`: day 1 of interest
* `year2`: year 2 of interest
* `month2`: month 2 of interest
* `day2`: day 2 of interest
* `AOD15`: data type, other options include `AOD10`, `AOD20`, etc.
* `AVG`: data format, `AVG=10` - all points, `AVG=20` - daily averages
The keywords below are those we will need for requesting daily averaged observations of Aerosol Optical Depth Level 1.5 data for the station Santa Cruz, Tenerife from 1 January to 31 December 2020.
```
data_dict = {
'endpoint': 'https://aeronet.gsfc.nasa.gov/cgi-bin/print_web_data_v3',
'station':'Santa_Cruz_Tenerife',
'year': 2020,
'month': 1,
'day': 1,
'year2': 2020,
'month2': 12,
'day2': 31,
'AOD15': 1,
'AVG': 20
}
```
In a next step, we construct the final string for the wget request with the `format` function. You construct a string by adding the dictionary keys in curled brackets. At the end of the string, you provide the dictionary key informatoin to the string with the `format()` function. A print of the resulting url shows, that the format function replaced the information in the curled brackets with the data in the dictionary.
```
url = '{endpoint}?site={station}&year={year}&month={month}&day={day}&year2={year2}&month2={month2}&day2={day2}&AOD15={AOD15}&AVG={AVG}'.format(**data_dict)
url
```
<br>
Now we are ready to request the data with the function `download()` from the wget Python library. You have to pass to the function the constructed url above together with a file path of where the downloaded that shall be stored. Let us store the data as `txt` file in the folder `../data/2_observations/aeronet/`.
```
wget.download(url, '../../../../eodata/50_modules/01_dust/04_assignment/aeronet/2020_santa_cruz_tenerife_20.txt')
```
<br>
After we downloaded the station observations as `txt` file, we can open it with the pandas function `read_table()`. We additonally set specific keyword arguments that allow us to specify the columns and rows of interest:
* `delimiter`: specify the delimiter in the text file, e.g. comma
* `header`: specify the index of the row that shall be set as header.
* `index_col`: specify the index of the column that shall be set as index
You see below that the resulting dataframe has 296 rows and 81 columns.
```
df = pd.read_table('../../../../eodata/50_modules/01_dust/04_assignment/aeronet/2020_santa_cruz_tenerife_20.txt', delimiter=',', header=[7], index_col=1)
df
```
Now, we can inspect the entries in the loaded data frame a bit more. Above you see that the last entry is a NaN entry, which is best to drop with the function `dropna()`.
The next step is then to replace the entries with -999.0 and set them as NaN. We can use the function `replace()` to do so.
```
df = df.dropna()
df = df.replace(-999.0, np.nan)
df
```
Let us now convert the index entry to a `DateTimeIndex` format with the function `to_datetime()`. Important here, you have to specify the format of the index string: `%d:%m:%Y`.
You see below that we do not have observations for every day. E.g on 2 January 2020, the data frame does not list any entry.
```
df.index = pd.to_datetime(df.index, format = '%d:%m:%Y')
df
```
<br>
We can now plot the column `AOD_500nm` as annual time-series. You see that the station `Santa Cruz, Tenerife` was affected by other dust events later in 2020.
```
# Initiate a figure
fig = plt.figure(figsize=(20,8))
ax = plt.subplot()
# Define the plotting function
ax.plot(df.AOD_500nm, 'o-', color='green', label='AERONET observations')
# Customize the title and axes lables
ax.set_title('\nAerosol Optical Depth at 500 nm - Santa Cruz Tenerife\n', fontsize=20)
ax.set_ylabel('~', fontsize=14)
ax.set_xlabel('\nDay', fontsize=14)
# Customize the fontsize of the axes tickes
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# Add a gridline to the plot
ax.grid(linestyle='--')
plt.legend(fontsize=16, loc=2)
```
<br>
The next question is now, how you can find out, if the strong increase of AOD at the end of August 2020 was because of dust? For this to find out, you can use the `Angstrom Exponent`, which gives us an indication of the particle size. If the `Angstrom Exponent` is below 0.6, then it is an indication that the increase of AOD is caused by coarser dust particles.
Let us visualize the AOD at 500nm for 2020 together with the `Angstrom Exponent 440-675nm`.
```
# Initiate a figure
fig = plt.figure(figsize=(20,8))
ax = plt.subplot()
# Define the plotting function
ax.plot(df.AOD_500nm, 'o-', color='green', label='AERONET observations')
ax.plot(df['440-675_Angstrom_Exponent'], '-', color='lightgrey', label='Angstrom Exponent - 440-675nm')
plt.axhline(y=0.6, color='r', linestyle='dotted', label='Angstrom Exponent <0.6 is dust')
# Customize the title and axes lables
ax.set_title('\nAerosol Optical Depth at 500 nm - Santa Cruz Tenerife\n', fontsize=20)
ax.set_ylabel('~', fontsize=14)
ax.set_xlabel('\nDay', fontsize=14)
# Customize the fontsize of the axes tickes
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# Add a gridline to the plot
ax.grid(linestyle='--')
plt.legend(fontsize=16, loc=2)
```
Above, you see that the Angstrom Exponent during the high AOD values at the end of August is very low. Hence, we could interpret this event as a strong dust intrusion. But is this really the case? You can also check [here](https://sds-was.aemet.es/forecast-products/dust-observations/msg-2013-eumetsat) the MSG SEVIRI Dust RGB for e.g. 26 August 2020 and [here](https://worldview.earthdata.nasa.gov/?v=-39.451155087380556,13.025874527486357,5.067364489712844,36.350274677008436&l=Reference_Labels_15m(hidden),Reference_Features_15m(hidden),Coastlines_15m,MODIS_Aqua_CorrectedReflectance_TrueColor(hidden),MODIS_Terra_CorrectedReflectance_TrueColor&lg=false&t=2020-08-26-T00%3A00%3A00Z) the MODIS RGB to better understand the event and what could have caused the high AOD values.
<br>
### <a id='cams_reanalysis'></a> 3. Download CAMS global reanalysis (EAC4) and select 2020 time-series for Santa Cruz, Tenerife
First, we have to download the `CAMS global reanalysis (EAC4)` from the [Copernicus Atmosphere Data Store](https://ads.atmosphere.copernicus.eu/cdsapp#!/dataset/cams-global-reanalysis-eac4?tab=form) with the following specifications:
* Variable on single levels: `Dust aerosol optical depth at 550 nm`
* Date: `Start=2020-01-01`, `End=2020-12-31`
* Time: `[00:00, 03:00, 06:00, 09:00, 12:00, 15:00, 18:00, 21:00]`
* Restricted area: `N: 30., W: -20, E: 14, S: 20.`
* Format: `netCDF`
See `CDSAPI` request below.
```
URL = 'https://ads.atmosphere.copernicus.eu/api/v2'
KEY = '######################'
import cdsapi
c = cdsapi.Client(url=URL, key=KEY)
c.retrieve(
'cams-global-reanalysis-eac4',
{
'variable': 'dust_aerosol_optical_depth_550nm',
'date': '2020-01-01/2020-12-31',
'time': [
'00:00', '03:00', '06:00',
'09:00', '12:00', '15:00',
'18:00', '21:00',
],
'area': [
30, -20, 20,
15,
],
'format': 'netcdf',
},
'../../../../eodata/50_modules/01_dust/04_assignment/cams/2020_dustAOD_cams_eac4.nc'}
```
<br>
The data is in netCDF, so we can open the netCDF file with the xarray function `open_dataset()`. We see that the data has three dimensions (`latitude`, `longitude`, `time`) and one data variable:
* `duaod550`: Dust Aerosol Optical Depth at 550nm
```
file = xr.open_dataset('../../../../eodata/50_modules/01_dust/04_assignment/cams/2020_dustAOD_cams_eac4.nc')
file
```
<br>
Let us now store the data variable `Dust Aerosol Optical Depth (AOD) at 550nm` as `xarray.DataArray` with the name `duaod_cams`.
```
duaod_cams = file['duaod550']
duaod_cams
```
<br>
Now, we can select the time-series of the grid point nearest to the station in Santa Cruz, Tenerife. We can use the function `sel()` to select data based on the longitude and latitude dimensions. The keyword argument `method='nearest'` selects the grid point entry closest to the station coordinates.
```
cams_ts = duaod_cams.sel(longitude=lon, latitude=lat, method='nearest')
cams_ts
```
<br>
The next step is now to resample the 3-hourly time entries and aggregate it to daily averages. We can use a combination of the functions `resample()` and `mean()` to create daily averages.
```
cams_ts_resample = cams_ts.resample(time='1D').mean()
cams_ts_resample
```
<br>
A closer look at the `time` dimension shows us that we now have an entry for each day in 2020.
```
cams_ts_resample.time
```
<br>
Now, we can convert the `xarray.DataArray` to a `pandas.DataFrame`, as pandas is more efficient to handle time-series data. The function `to_dataframe()` easily converts a data array to a dataframe. The resulting dataframe has 366 rows and 3 columns.
```
cams_ts_df = cams_ts_resample.to_dataframe()
cams_ts_df
```
<br>
### <a id='visualize_annual_ts'></a>4. Combine both annual time-series and visualize both in one plot
Let us now use the function `join()` and combine the two time-series `cams_ts_df` and `df['AOD_500nm]`. The resulting dataframe has 366 rows and 4 columns.
```
df_combined = cams_ts_df.join(df['AOD_500nm'])
df_combined
```
Let us safe the pandas dataframe as csv file. This allows us to easily load the time-series again at a later stage. You can use the function `to_csv()` to save a pandas.DataFrame as csv.
```
df_combined.to_csv("../../../../eodata/50_modules/01_dust/04_assignment/2020_ts_cams_aeronet.csv", index_label='time')
```
<br>
The last step is now to plot the two columns of the pandas.DataFrame `df_combined` as two individual line plots.
```
# Initiate a figure
fig = plt.figure(figsize=(20,8))
ax = plt.subplot()
# Define the plotting function
ax.plot(df_combined.duaod550, '-', color='blue', label='CAMS global reanalysis (EAC4) - 550 nm')
ax.plot(df_combined.AOD_500nm, '-', color='green', label='AERONET observations - 500 nm')
plt.axhline(y=0.6, color='r', linestyle='dotted', label='PM10 daily limit')
# Customize the title and axes lables
ax.set_title('\nAerosol Optical Depth at 500 / 550 nm - Santa Cruz Tenerife\n', fontsize=20)
ax.set_ylabel(cams_ts.units, fontsize=14)
ax.set_xlabel('\nDay', fontsize=14)
# Customize the fontsize of the axes tickes
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# Add a gridline to the plot
ax.grid(linestyle='--')
plt.legend(fontsize=14, loc=2)
```
<br>
You see in the plot above that the model and the AERONET observations follow a similar annual cycle of AOD in 2020 for the Santa Cruz station in Tenerife. You also see that for higher AOD values measured by AERONET, the CAMS model mostly underpredicts the AOD intensity.
<hr>
<img src='../../img/copernicus_logo.png' alt='Logo EU Copernicus' align='right' width='20%'><br><br><br><br>
<p style="text-align:right;">This project is licensed under the <a href="./LICENSE">MIT License</a> and is developed under a Copernicus contract.
| github_jupyter |
```
import zarr
from pyprojroot import here
import pandas as pd
import numpy as np
import allel
import yaml
import matplotlib.pyplot as plt
import functools
import seaborn as sns
sns.set_context('paper')
sns.set_style('darkgrid')
import dask.array as da
import scipy.interpolate
import scipy.stats
import petl as etl
import pyfasta
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
# OLD VECTORBASE - gone
# genome_path = here() / 'data/external/vectorbase/Anopheles-gambiae-PEST_CHROMOSOMES_AgamP4.fa'
# genome = pyfasta.Fasta(str(genome_path), key_fn=lambda x: x.split()[0])
# NEW VECTORBASE
def _genome_key_fn(s):
k = s.split()[0]
if k.startswith('AgamP4'):
k = k.split('_')[1]
return k
genome_path = here() / 'data/external/vectorbase/VectorBase-48_AgambiaePEST_Genome.fasta'
genome = pyfasta.Fasta(str(genome_path), key_fn=_genome_key_fn)
chromosomes = '2', '3', 'X'
chromosome_plen = {
'2': len(genome['2R']) + len(genome['2L']),
'3': len(genome['3R']) + len(genome['3L']),
'X': len(genome['X'])
}
pop_defs_path = here() / 'notebooks/gwss/pop_defs.yml'
with open(pop_defs_path, 'rt') as f:
pop_defs = yaml.safe_load(f)
pops = list(pop_defs)
h12_root_path = here() / 'data/gwss/h12/h12.zarr'
h12_root = zarr.open_consolidated(str(h12_root_path))
def load_h12_gwss(pop, chromosome):
window_size = pop_defs[pop]['h12_window_size']
window_step = 200
grp = h12_root[f'{pop}/{window_size}/{window_step}/{chromosome}']
return (
grp['windows'][:],
grp['gwindows'][:],
grp['h1'][:],
grp['h12'][:],
grp['h123'][:],
grp['h2_h1'][:]
)
ihs_root_path = here() / 'data/gwss/ihs/ihs.zarr'
ihs_root = zarr.open_consolidated(str(ihs_root_path))
@functools.lru_cache(maxsize=None)
def load_ihs_gwss(pop, chromosome, window_size=200, window_step=100):
grp = ihs_root[f'{pop}/{chromosome}']
pos = grp['pos'][:]
gpos = grp['gpos'][:]
ihs_std = np.fabs(grp['ihs_std'][:])
x = allel.moving_statistic(pos, np.mean, size=window_size, step=window_step)
gx = allel.moving_statistic(gpos, np.mean, size=window_size, step=window_step)
y_max = allel.moving_statistic(ihs_std, np.max, size=window_size, step=window_step)
y_pc95 = allel.moving_statistic(ihs_std, lambda v: np.percentile(v, 95), size=window_size, step=window_step)
y_pc75 = allel.moving_statistic(ihs_std, lambda v: np.percentile(v, 75), size=window_size, step=window_step)
y_pc50 = allel.moving_statistic(ihs_std, np.median, size=window_size, step=window_step)
return x, gx, y_max, y_pc95, y_pc75, y_pc50
xpehh_root_path = here() / 'data/gwss/xpehh/xpehh.zarr'
xpehh_root = zarr.open_consolidated(str(xpehh_root_path))
@functools.lru_cache(maxsize=None)
def load_xpehh_gwss(pop1, pop2, chromosome, window_size=500, window_step=250):
# avoid running the same scan twice
orig_pop1, orig_pop2 = pop1, pop2
pop1, pop2 = sorted([pop1, pop2])
grp = xpehh_root[f'{pop1}_{pop2}/{chromosome}']
pos = grp['pos'][:]
gpos = grp['gpos'][:]
xpehh = grp['xpehh'][:]
if pop1 == orig_pop2:
# flip back
xpehh = -xpehh
pop1, pop2 = pop2, pop1
# centre
xpehh = xpehh - np.median(xpehh)
# clip at zero to focus on selection in pop1
xpehh1 = np.clip(xpehh, a_min=0, a_max=None)
x = allel.moving_statistic(pos, np.mean, size=window_size, step=window_step)
gx = allel.moving_statistic(gpos, np.mean, size=window_size, step=window_step)
y_max = allel.moving_statistic(xpehh1, np.max, size=window_size, step=window_step)
y_pc95 = allel.moving_statistic(xpehh1, lambda v: np.percentile(v, 95), size=window_size, step=window_step)
y_pc75 = allel.moving_statistic(xpehh1, lambda v: np.percentile(v, 75), size=window_size, step=window_step)
y_pc50 = allel.moving_statistic(xpehh1, np.median, size=window_size, step=window_step)
return x, gx, y_max, y_pc95, y_pc75, y_pc50
pbs_root_path = here() / 'data/gwss/pbs/pbs.zarr'
pbs_root = zarr.open_consolidated(str(pbs_root_path))
def load_pbs_gwss(pop1, pop2, pop3, chromosome, window_size=500, window_step=250):
grp_path = f'/{pop1}_{pop2}_{pop3}/{window_size}/{window_step}/{chromosome}'
grp = pbs_root[grp_path]
windows = grp['windows'][:]
gwindows = grp['gwindows'][:]
pbs = grp['pbs'][:]
pbs_scaled = grp['pbs_scaled'][:]
return windows, gwindows, pbs, pbs_scaled
def load_genes():
# OLD VECTORBASE
# features_path = here() / 'data/external/vectorbase/Anopheles-gambiae-PEST_BASEFEATURES_AgamP4.12.gff3'
# df_genes = (
# allel.gff3_to_dataframe(
# str(features_path),
# attributes=['ID', 'Name', 'biotype']
# )
# .set_index('ID')
# .query("type == 'gene' and biotype == 'protein_coding'")
# )
# NEW VECTORBASE
features_path = here() / 'data/external/vectorbase/VectorBase-48_AgambiaePEST.gff'
df_genes = (
allel.gff3_to_dataframe(
str(features_path),
attributes=['ID', 'description']
)
.sort_values(['seqid', 'start'])
.set_index('ID')
.query("type == 'gene'")
)
# fix chromosome IDs
df_genes['seqid'] = df_genes['seqid'].str.split('_', expand=True).loc[:, 1]
# convert to chromosomal coordinates
df_genes['chromosome'] = df_genes['seqid'].copy()
df_genes['chromosome_start'] = df_genes['start'].copy()
df_genes['chromosome_end'] = df_genes['end'].copy()
loc_2R = df_genes.seqid == '2R'
df_genes.loc[loc_2R, 'chromosome'] = '2'
loc_2L = df_genes.seqid == '2L'
df_genes.loc[loc_2L, 'chromosome'] = '2'
df_genes.loc[loc_2L, 'chromosome_start'] = df_genes.loc[loc_2L, 'start'] + len(genome['2R'])
df_genes.loc[loc_2L, 'chromosome_end'] = df_genes.loc[loc_2L, 'end'] + len(genome['2R'])
loc_3R = df_genes.seqid == '3R'
df_genes.loc[loc_3R, 'chromosome'] = '3'
loc_3L = df_genes.seqid == '3L'
df_genes.loc[loc_3L, 'chromosome'] = '3'
df_genes.loc[loc_3L, 'chromosome_start'] = df_genes.loc[loc_3L, 'start'] + len(genome['3R'])
df_genes.loc[loc_3L, 'chromosome_end'] = df_genes.loc[loc_3L, 'end'] + len(genome['3R'])
df_genes['chromosome_center'] = (df_genes['chromosome_start'] + df_genes['chromosome_end']) / 2
return df_genes
df_genes = load_genes()
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ace1 = df_genes.loc['AGAP001356']
ace1['Name'] = 'Ace1'
cyp6p3 = df_genes.loc['AGAP002865']
cyp6p3['Name'] = 'Cyp6p3'
vgsc = df_genes.loc['AGAP004707']
vgsc['Name'] = 'Vgsc'
gaba = df_genes.loc['AGAP006028']
gaba['Name'] = 'Gaba'
gste2 = df_genes.loc['AGAP009194']
gste2['Name'] = 'Gste2'
cyp9k1 = df_genes.loc['AGAP000818']
cyp9k1['Name'] = 'Cyp9k1'
ir_genes = [ace1, cyp6p3, vgsc, gaba, gste2, cyp9k1]
novel_loci = {
'A': ('2', 24_860_000),
'B': ('2', 40_940_000),
'C': ('2', 28_549_590 + len(genome['2R'])),
'D': ('2', 34_050_000 + len(genome['2R'])),
'E': ('X', 4_360_000),
'F': ('X', 9_220_000),
}
tbl_chromatin = [
('name', 'chrom', 'start', 'end'),
('CHX', 'X', 20009764, 24393108),
('CH2R', '2R', 58984778, 61545105),
('CH2L', '2L', 1, 2431617),
('PEU2L', '2L', 2487770, 5042389),
('IH2L', '2L', 5078962, 5788875),
('IH3R', '3R', 38988757, 41860198),
('CH3R', '3R', 52161877, 53200684),
('CH3L', '3L', 1, 1815119),
('PEU3L', '3L', 1896830, 4235209),
('IH3L', '3L', 4264713, 5031692)
]
seq_ids = '2R', '2L', '3R', '3L', 'X'
def build_gmap():
# crude recombination rate lookup, keyed off chromatin state
# use units of cM / bp, assume 2 cM / Mbp == 2x10^-6 cM / bp
tbl_rr = (
etl.wrap(tbl_chromatin)
# extend heterochromatin on 2L - this is empirical, based on making vgsc peaks symmetrical
.update('end', 2840000, where=lambda r: r.name == 'CH2L')
.update('start', 2840001, where=lambda r: r.name == 'PEU2L')
.addfield('rr', lambda r: .5e-6 if 'H' in r.name else 2e-6)
)
# per-base map of recombination rates
rr_map = {seq_id: np.full(len(genome[seq_id]), fill_value=2e-6, dtype='f8')
for seq_id in seq_ids}
for row in tbl_rr.records():
rr_map[row.chrom][row.start - 1:row.end] = row.rr
# genetic map
gmap = {seq_id: np.cumsum(rr_map[seq_id]) for seq_id in seq_ids}
gmap['2'] = np.concatenate([gmap['2R'], gmap['2L'] + gmap['2R'][-1]])
gmap['3'] = np.concatenate([gmap['3R'], gmap['3L'] + gmap['3R'][-1]])
return gmap
gmap = build_gmap()
def tex_italicize_species(s):
return (
s
.replace('An. gambiae', '\textit{An. gambiae}')
.replace('An. coluzzii', '\textit{An. coluzzii}')
)
def root_mean_square(s):
return np.sqrt(np.mean(s**2))
def mean_absolute(s):
return np.mean(np.fabs(s))
```
| github_jupyter |
# 2. Modelling SVR Linear
---
```
## load modules and run mlflow_logging.ipynb to get function to track model information on MLFLow
import sys
sys.path.append("..")
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.svm import LinearSVR
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import GridSearchCV
from modeling.functions import modelling, log_to_mlflow, get_features, save_models, save_results
data = pd.read_csv('../data/GEFCom2014Data/Wind/raw_data_incl_features.csv', parse_dates=['TIMESTAMP'])
data.head()
data.dropna(inplace=True)
data.info()
RSEED = 42
data = pd.get_dummies(data, columns = ['WD100CARD','WD10CARD'])
data.head()
## train-test-split
#data_train, data_test = train_test_split(data, test_size=0.25, random_state=RSEED, stratify=data.ZONEID)
data_train = data[data.TIMESTAMP <= '2013-07-01 00:00:00']
data_test = data[data.TIMESTAMP > '2013-07-01 00:00:00']
# define features and feature dict
feature_dict = get_features(data)
features = feature_dict['all']
# define zone
zone = 1
# split train and test data in feature and TARGETVAR parts and cut data to desired zones
X_train = data_train[data_train.ZONEID == zone][features]
y_train = data_train[data_train.ZONEID == zone].TARGETVAR
X_test = data_test[data_test.ZONEID == zone][features]
y_test = data_test[data_test.ZONEID == zone].TARGETVAR
# Scale data
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = LinearSVR()
param_grid = {'C': [0.01, 0.1, 1, 10, 100]}
# cv = GridSearchCV(model, param_grid=param_grid)
# cv.fit(X_train, y_train)
#y_pred = cv.predict(X_test)
#print(mean_squared_error(y_test, y_pred, squared=False))
model = LinearSVR(max_iter=100000)
scaler = MinMaxScaler()
model_dict = {}
results_train = {}
results_test = {}
param_grid = {'C': [0.1, 1, 10]}
for key in feature_dict.keys():
print(f'Features: {key}')
results_train[key],results_test[key], model_dict[key] = modelling(data_train, data_test, feature_dict[key], model = model, scaler=scaler, print_scores=True, log=None, infotext_mlflow=None, save_model= True, perform_gridCV= True, param_grid= param_grid, n_jobs = 3)
results_test[key] = {k : np.round(value,5) for k,value in results_test[key].items()}
results_test
#save_models(model_dict)
save_results(results_train, results_test, '../saved_models/211203_1152_LinearSVR')
def save_scores(results_dict):
pass
features = []
zones = []
train_score = []
test_score = []
for key in results_train.keys():
for zone in results_train[key].keys():
features.append(key)
zones.append(zone)
train_score.append(results_train[key][zone])
test_score.append(results_test[key][zone])
df = pd.DataFrame({'features':features,'zone': zones,'train_score': train_score,'test_score': test_score})
df.to_csv(path + '/' + file_name, index=False)
df
df.zone.unique()
path = '../saved_models/211201_0915_GridSearchCV'
file_name = path.split('/')[-1] + '.csv'
file_name
```
| github_jupyter |
<h1> <b>Homework 1</b></h1>
<i>Alejandro J. Rojas<br>
ale@ischool.berkeley.edu<br>
W261: Machine Learning at Scale<br>
Week: 01<br>
Jan 21, 2016</i></li>
<h2>HW1.0.0.</h2> Define big data. Provide an example of a big data problem in your domain of expertise.
The term big data is asoociated to datasets that cannot be processed, stored and transformed using traditional applications and tools because of high volume, high velocity and high variety. By high volume, we mean datasets that not only require high storage capacity, usually beyond 1T, but also datasets that are too big to have a decent processing and thoughput time. By high velocity, we mean data that requires real-time processing with throughput speeds that can be bursty. High variety includes data that comes from different formats, some structured some that are not, that all need to be ingested and transformed to be processed. Big data is simply changing the way we used to collect and analyze data at the time that it opens opportunity to increase the scale, scope and intimacy of the analyses that we are now able to do.
The social web is a leading source of big data applications given our ability to log almost anything that the user does when interacting to an application. In my field, I've seen how online videos are increasingly the way users consume media. A video, per se, is an unstructured data item and its interactions are usually captured by leading social media platforms like Facebook, Twitter and Youtube in the form of JSON, a semi unstructured format that can capture user interactions such as likes, shares and comments. Across the internet, the amount of videos being upload and downstream is exploding making it a challenge to measure real-time, the media consuming habits of our target users. Big data can help in providing insights from all of this information so that we can better predict the taste of users visiting our site properties to serve them content they like.
<h2>HW1.0.1.</h2>In 500 words (English or pseudo code or a combination) describe how to estimate the bias, the variance, the irreduciable error for a test dataset T when using polynomial regression models of degree 1, 2,3, 4,5 are considered. How would you select a model?
For any dataset T that contains n independent variables (x1, x2, ..xn) and one dependent variable y_true, we can observe the following:
If we try to estimate y as a function of x:
y_pred = f(x)
The estimate of our function will produce an error shown as:
<img src="error.png">
This error varies as we increase the complexity of our models as the following chart shows:
<img src="Prediction-Error.png">
The source of this error can be divided into three types:
bias
variance
irreducible error
and can be derived mathematically the following way
<img src="mathematicalerrors.jpg">
Bias error is introduced by us when we try to simplify the dynamics that we observe in the data, for instace by using a linear function to estimate y.
As we try to better fit the underlying data, we can try implementing nonlinear functions.
As the order of the polynomial regression increases, our function f(x) will more closely match the underlying portion of the dataset T and consequently we reduced our bias error.
However, if we randomly applied our high-ordered polynomial f(x) to another portion of dataset T, we will find that our error will increase because we introduced variance error by overfitting the prior dataset.
So as a rule of thumb, we can say that
as the degree of the predictive polynomial function f(x) increases:
bias error is reduced
variance error is increased
the trick is to find the optimal point where the sum of these two errors are at the minimum.Even at that point, our function(x) will still show some error that will be irreducible because it comes from imprecisions in the way data was collected or other type of noise present in the dataset T.
In this chart you can see how each of these errors varies as we bootstrap 50 samples of dataset T:
<img src="bootstrapping.jpg">
<h2> HW1.1.</h2> Read through the provided control script (pNaiveBayes.sh)
and all of its comments. When you are comfortable with their
purpose and function, respond to the remaining homework questions below.
A simple cell in the notebook with a print statmement with a "done" string will suffice here. (dont forget to include the Question Number and the quesition in the cell as a multiline comment!)
# <----------------------------------End of HW1.1------------------------------------->
<h2>HW1.2.</h2>Provide a mapper/reducer pair that, when executed by pNaiveBayes.sh
will determine the number of occurrences of a single, user-specified word. Examine the word “assistance” and report your results.
# Map
```
%%writefile mapper.py
#!/usr/bin/python
## mapper.py
## Author: Alejandro J. Rojas
## Description: mapper code for HW1.2-1.5
import sys
import re
count = 0
records = 0
words = 0
## collect user input
filename = sys.argv[1]
findwords = re.split(" ",sys.argv[2].lower())
with open (filename, "r") as myfile:
for line in myfile.readlines():
record = re.split(r'\t+', line)
records = records + 1
for i in range (len(record)):
bagofwords = re.split(" ",record[i]) ### Break each email records into words
for word in bagofwords:
words = words + 1
for keyword in findwords:
if keyword in word:
count = count + 1 ### Add one the count of found words
##print '# of Records analized',records
##print '# of Words analized', words
##print '# of Ocurrences', count
print count
!chmod +x mapper.py
```
# Reduce
```
%%writefile reducer.py
#!/usr/bin/python
## reducer.py
## Author: Alejandro J. Rojas
## Description: reducer code for HW1.2
import sys
import re
sum = 0
## collect user input
filenames = sys.argv[1:]
for file in filenames:
with open (file, "r") as myfile:
for line in myfile.readlines():
if line.strip():
sum = sum + int(line) ### Add counts present on all mapper produced files
print sum
!chmod +x reducer.py
```
# Write script to file
```
%%writefile pNaiveBayes.sh
## pNaiveBayes.sh
## Author: Jake Ryland Williams
## Usage: pNaiveBayes.sh m wordlist
## Input:
## m = number of processes (maps), e.g., 4
## wordlist = a space-separated list of words in quotes, e.g., "the and of"
##
## Instructions: Read this script and its comments closely.
## Do your best to understand the purpose of each command,
## and focus on how arguments are supplied to mapper.py/reducer.py,
## as this will determine how the python scripts take input.
## When you are comfortable with the unix code below,
## answer the questions on the LMS for HW1 about the starter code.
## collect user input
m=$1 ## the number of parallel processes (maps) to run
wordlist=$2 ## if set to "*", then all words are used
## a test set data of 100 messages
data="enronemail_1h.txt"
## the full set of data (33746 messages)
# data="enronemail.txt"
## 'wc' determines the number of lines in the data
## 'perl -pe' regex strips the piped wc output to a number
linesindata=`wc -l $data | perl -pe 's/^.*?(\d+).*?$/$1/'`
## determine the lines per chunk for the desired number of processes
linesinchunk=`echo "$linesindata/$m+1" | bc`
## split the original file into chunks by line
split -l $linesinchunk $data $data.chunk.
## assign python mappers (mapper.py) to the chunks of data
## and emit their output to temporary files
for datachunk in $data.chunk.*; do
## feed word list to the python mapper here and redirect STDOUT to a temporary file on disk
####
####
./mapper.py $datachunk "$wordlist" > $datachunk.counts &
####
####
done
## wait for the mappers to finish their work
wait
## 'ls' makes a list of the temporary count files
## 'perl -pe' regex replaces line breaks with spaces
countfiles=`\ls $data.chunk.*.counts | perl -pe 's/\n/ /'`
## feed the list of countfiles to the python reducer and redirect STDOUT to disk
####
####
./reducer.py $countfiles > $data.output
####
####
numOfInstances=$(cat $data.output)
echo "found [$numOfInstances] [$wordlist]" ## Report how many were found
## clean up the data chunks and temporary count files
\rm $data.chunk.*
!chmod a+x pNaiveBayes.sh
```
# Run file
Usage: usage: pNaiveBayes.sh m wordlist
```
!./pNaiveBayes.sh 5 "assistance"
```
# <----------------------------------End of HW1.2------------------------------------->
<h2>HW1.3.</h2> Provide a mapper/reducer pair that, when executed by pNaiveBayes.sh
will classify the email messages by a single, user-specified word using the multinomial Naive Bayes Formulation. Examine the word “assistance” and report your results.
# Map
```
%%writefile mapper.py
#!/usr/bin/python
## mapper.py
## Author: Alejandro J. Rojas
## Description: mapper code for HW1.3
import sys
import re
########## Collect user input ###############
filename = sys.argv[1]
findwords = re.split(" ",sys.argv[2].lower())
with open (filename, "r") as myfile:
for line in myfile.readlines():
record = re.split(r'\t+', line) ### Each email is a record with 4 components
### 1) ID 2) Spam Truth 3) Subject 4) Content
if len(record)==4: ### Take only complete records
########## Variables to collect and measure #########
records = 0 ### Each record corresponds to a unique email
words = 0 ### Words written in all emails incluidng Subject
spam_records, spam_words, spam_count = 0,0,0 ### Spam email count, words in spam email, user-specified word count
ham_records, ham_words, ham_count = 0, 0, 0 ### Same as above but for not spam emails
records += 1 ### add one the the total sum of emails
if int(record[1]) == 1: ### If the email is labeled as spam
spam_records += 1 ### add one to the email spam count
for i in range (2,len(record)): ### Starting from Subject to the Content
bagofwords = re.split(" ",record[i]) ### Collect all words present on each email
for word in bagofwords: ### For each word
words += 1 ### add one to the total sum of words
spam_words += 1 ### add one to the total sum of spam words
for keyword in findwords: ### for each word specified by user
if keyword in word: ### If there's a match then
spam_count += 1 ### add one to the user specified word count as spam
else: ### If email is not labeled as spam
ham_records +=1 ### add one to the email ham count
for i in range (2,len(record)): ### Starting from Subject to the Content
bagofwords = re.split(" ",record[i]) ### Collect all words present on each email
for word in bagofwords: ### For each word
words += 1 ### add one to the total sum of words
ham_words += 1 ### add one to the total sum of ham words
for keyword in findwords: ### for each word specified by user
if keyword in word: ### If there's a match then
ham_count += 1 ### add one to the user specified word count as ham
record_id = record[0]
truth = record[1]
print spam_count, " ", spam_words, " ", spam_records, " ", \
ham_count, " ", ham_words, " ", ham_records, " ", \
words, " ", records, " ", record_id, " ", truth
!chmod +x mapper.py
```
# Reduce
```
%%writefile reducer.py
#!/usr/bin/python
## reducer.py
## Author: Alejandro J. Rojas
## Description: reducer code for HW1.3-1.4
import sys
import re
sum_spam_records, sum_spam_words, sum_spam_count = 0,0,0
sum_ham_records, sum_ham_words, sum_ham_count = 0,0,0
sum_records,sum_words = 0,0
## collect user input
filenames = sys.argv[1:]
for file in filenames:
with open (file, "r") as myfile:
for line in myfile.readlines():
if line.strip():
factors = re.split(" ", line)
sum_spam_count += int(factors[0]) ## sum up every time the word was found in a spam
sum_spam_words += int(factors[3]) ## sum up all words from spams
sum_spam_records+= int(factors[6]) ## sum up all emails labeled as spam
sum_ham_count += int(factors[9]) ## sum up every time the word was found in a ham
sum_ham_words += int(factors[12]) ## sum up all words from hams
sum_ham_records += int(factors[15]) ## sum up all emails labeled as ham
sum_words += int(factors[18]) ## sum all words from all emails
sum_records += int(factors[21]) ## sum all emails
prior_spam = float(sum_spam_records)/float(sum_records) ## prior prob of a spam email
prior_ham = float(sum_ham_records)/float(sum_records) ## prior prob of a ham email
prob_word_spam = float(sum_spam_count)/float(sum_spam_words)## prob of word given that email is spam
prob_word_ham = float(sum_ham_count)/float(sum_ham_words) ## prob of word given that email is ham
##check_prior = prior_spam + prior_ham ## check priors -> sum to 1
##check_words = float(sum_words)/float(sum_spam_words+sum_ham_words) ## check probabilities of a word -> sum to 1
##check_spam = prob_word_spam*float(sum_spam_words)/float(sum_spam_count) ## check spam counts -> sum to 1
##check_ham = prob_word_ham*float(sum_ham_words)/float(sum_ham_count) ## check ham count -> sum to 1
sum_count = sum_spam_count+sum_ham_count
print "Summary of Data"
print '%4s'%sum_records ,'emails examined, containing %6s'%sum_words, 'words, we found %3s'%sum_count ,'matches.'
print '%30s' %'ID', '%10s' %'TRUTH', '%10s' %'CLASS', '%20s' %'CUMULATIVE ACCURACY'
miss, sample_size = 0,0
for file in filenames:
with open (file, "r") as myfile:
for line in myfile.readlines():
if line.strip():
data = re.split(" ", line)
record_id = data[24]
y_true = int(data[27][0])
count = int(data[0]) + int(data[9])
p_spam = prior_spam*prob_word_spam**count
p_ham = prior_ham*prob_word_ham**count
if p_spam > p_ham:
y_pred = 1
else:
y_pred = 0
if y_pred != y_true:
miss+= 1.0
sample_size += 1.0
accuracy = ((sample_size-miss)/sample_size)*100
print '%30s' %record_id, '%10s' %y_true, '%10s' %y_pred, '%18.2f %%' % accuracy
!chmod +x reducer.py
```
# Write script to file
```
%%writefile pNaiveBayes.sh
## pNaiveBayes.sh
## Author: Jake Ryland Williams
## Usage: pNaiveBayes.sh m wordlist
## Input:
## m = number of processes (maps), e.g., 4
## wordlist = a space-separated list of words in quotes, e.g., "the and of"
##
## Instructions: Read this script and its comments closely.
## Do your best to understand the purpose of each command,
## and focus on how arguments are supplied to mapper.py/reducer.py,
## as this will determine how the python scripts take input.
## When you are comfortable with the unix code below,
## answer the questions on the LMS for HW1 about the starter code.
## collect user input
m=$1 ## the number of parallel processes (maps) to run
wordlist=$2 ## if set to "*", then all words are used
## a test set data of 100 messages
data="enronemail_1h.txt"
## the full set of data (33746 messages)
# data="enronemail.txt"
## 'wc' determines the number of lines in the data
## 'perl -pe' regex strips the piped wc output to a number
linesindata=`wc -l $data | perl -pe 's/^.*?(\d+).*?$/$1/'`
## determine the lines per chunk for the desired number of processes
linesinchunk=`echo "$linesindata/$m+1" | bc`
## split the original file into chunks by line
split -l $linesinchunk $data $data.chunk.
## assign python mappers (mapper.py) to the chunks of data
## and emit their output to temporary files
for datachunk in $data.chunk.*; do
## feed word list to the python mapper here and redirect STDOUT to a temporary file on disk
####
####
./mapper.py $datachunk "$wordlist" > $datachunk.counts &
####
####
done
## wait for the mappers to finish their work
wait
## 'ls' makes a list of the temporary count files
## 'perl -pe' regex replaces line breaks with spaces
countfiles=`\ls $data.chunk.*.counts | perl -pe 's/\n/ /'`
## feed the list of countfiles to the python reducer and redirect STDOUT to disk
####
####
./reducer.py $countfiles > $data.output
####
####
numOfInstances=$(cat $data.output)
echo "NB Classifier based on word(s): $wordlist" ## Print out words
echo "$numOfInstances" ## Print out output data
## clean up the data chunks and temporary count files
\rm $data.chunk.*
```
# Run file
```
!./pNaiveBayes.sh 5 "assistance"
```
# <----------------------------------End of HW1.3------------------------------------->
<h2>HW1.4.</h2> Provide a mapper/reducer pair that, when executed by pNaiveBayes.sh
will classify the email messages by a list of one or more user-specified words. Examine the words “assistance”, “valium”, and “enlargementWithATypo” and report your results
# Run file
```
!./pNaiveBayes.sh 5 "assistance valium enlargementWithATypo"
```
# <----------------------------------End of HW1.4------------------------------------->
# <----------------------------------End of HW1------------------------------------->
| github_jupyter |
# Section 1: Preprocessing
## Behavior Analysis
### Generate trial regressors
```
import os
import numpy as np
from pandas import concat, read_csv
from scipy.stats import gamma
def normalize(arr): return (arr - arr.min()) / (arr.max() - arr.min())
root_dir = '/space/sophia/2/users/EMOTE-DBS/afMSIT/behavior'
subjects = ['BRTU', 'CHDR', 'CRDA', 'JADE', 'JASE', 'M5', 'MEWA', 'S2']
threshold = 0.005
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load / Concatenate / Prepare Data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
df = []
for subject in subjects:
## Load CSV.
csv = read_csv(os.path.join(root_dir,'%s_msit_data.txt' %subject))
## Limit columns.
csv = csv[['SubjID','trial','iaps','DBS','interference','valence','arousal','responseTime','responseCorrect']]
## Rename columns.
csv.columns = ['Subject', 'Trial', 'IAPS', 'DBS', 'Interference', 'Valence_Obj', 'Arousal_Obj', 'RT', 'Accuracy']
## Load IAPS ratings.
iaps = read_csv(os.path.join(root_dir,'%s_IAPS_SAM.csv' %subject))
iaps = iaps[['IAPS_Number','Valence','Arousal']]
iaps.columns = ['IAPS','Valence_Subj','Arousal_Subj']
## Merge. Append.
csv = csv.merge(iaps, on='IAPS')
cols = ['Subject', 'Trial', 'IAPS', 'DBS', 'Interference', 'Valence_Obj', 'Arousal_Obj',
'Valence_Subj', 'Arousal_Subj', 'RT', 'Accuracy']
csv = csv[cols]
df.append(csv)
## Merge data. Sort.
df = concat(df)
df['DBS'] = np.where(df['DBS']=='DBSoff',0,1)
df = df.sort_values(['Subject','DBS','Trial']).reset_index(drop=True)
## Normalize regressors.
df['nsArousal'] = normalize(df.Arousal_Subj)
df['nsValence'] = normalize(df.Valence_Subj)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Determine Trials for Inclusion/Exclusion.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Set missing RTs to NaNs.
df['RT'] = np.where(df.Accuracy==-1, np.nan, df.RT)
df['Accuracy'] = np.where(df.Accuracy==-1, np.nan, df.Accuracy)
df['Missing'] = df.Accuracy.isnull().astype(int)
## Add Error column.
df['Error'] = 1 - df.Accuracy
## Add Post-Error Column.
df['PostError'] = 0
for subject in df.Subject.unique():
error = df.loc[df.Subject==subject,'Error']
posterror = np.insert(np.roll(error,1)[1:], 0, 0)
df.loc[df.Subject==subject,'PostError'] = posterror
## Iteratively detect outliers across subjects by fitting a Gamma distribution.
df['GammaCDF'], df['Outlier'] = 0, 0
for subject in df.Subject.unique():
## Fit Gamma to reaction time distribution.
shape, loc, scale = gamma.fit(df.loc[(df.Subject==subject)&(~df.RT.isnull()),'RT'], floc=0)
## Find outliers given likelihood threshold.
cdf = gamma.cdf(df.loc[(df.Subject==subject)&(~df.RT.isnull()),'RT'], shape, loc=loc, scale=scale)
outliers = (cdf < threshold) | (cdf > 1 - threshold)
## Append information.
df.loc[(df.Subject==subject)&(~df.RT.isnull()), 'GammaCDF'] += cdf
df.loc[(df.Subject==subject)&(~df.RT.isnull()), 'Outlier'] += outliers.astype(int)
## Generate exclude.
df['Exclude'] = np.where( df[['Missing','Error','PostError','Outlier']].sum(axis=1), 1, 0)
print '%s trials (%0.2f%%) excluded.' %(df.Exclude.sum(), df.Exclude.mean())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Save.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
df.to_csv('%s/afMSIT_group_data.csv' %root_dir, index=False)
```
## Parcellation
### Make EMOTE Labels
```
import os, shutil
import numpy as np
import pylab as plt
from mne import read_label, read_source_spaces, read_surface, set_log_level
set_log_level(verbose=False)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
fs_dir = '/space/sophia/2/users/EMOTE-DBS/freesurfs'
subject = 'BRTU'
parc = 'laus250'
label_dir = os.path.join(fs_dir,subject,'label',parc)
out_dir = os.path.join(fs_dir,subject,'label','april2016')
if os.path.isdir(out_dir): shutil.rmtree(out_dir)
os.makedirs(out_dir)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Build Left Hemisphere Labels.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
hemi = 'lh'
rr, _ = read_surface(os.path.join(fs_dir, subject, 'surf', '%s.inflated' %hemi))
src = read_source_spaces(os.path.join(fs_dir, subject, 'bem', '%s-oct-6-src.fif' %subject))[0]
lhdict = {'dlpfc_1-lh':['caudalmiddlefrontal_1', 'caudalmiddlefrontal_5', 'caudalmiddlefrontal_6'],
'dlpfc_2-lh':['caudalmiddlefrontal_2', 'caudalmiddlefrontal_3', 'caudalmiddlefrontal_4'],
'dlpfc_3-lh':['rostralmiddlefrontal_2', 'rostralmiddlefrontal_3'],
'dlpfc_4-lh':['rostralmiddlefrontal_1', 'rostralmiddlefrontal_5'],
'dlpfc_5-lh':['parstriangularis_2', 'parsopercularis_2'],
'dlpfc_6-lh':['parsopercularis_3', 'parsopercularis_4'],
'racc-lh':['rostralanteriorcingulate_1','rostralanteriorcingulate_2'],
'dacc-lh':['caudalanteriorcingulate_1','caudalanteriorcingulate_2',],
'pcc-lh':['posteriorcingulate_2','posteriorcingulate_3']}
for k,V in lhdict.iteritems():
label = np.sum([read_label(os.path.join(label_dir,'%s-%s.label' %(v,hemi)), subject=subject)
for v in V])
n_vert = np.intersect1d(src['vertno'], label.vertices).shape[0]
print '%s\t%s' %(n_vert,k)
label.save(os.path.join(out_dir, '%s.label' %k))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Build Right Hemisphere Labels.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
hemi = 'rh'
rr, _ = read_surface(os.path.join(fs_dir, subject, 'surf', '%s.inflated' %hemi))
src = read_source_spaces(os.path.join(fs_dir, subject, 'bem', '%s-oct-6-src.fif' %subject))[1]
rhdict = {'dlpfc_1-rh':['caudalmiddlefrontal_1', 'caudalmiddlefrontal_2', 'caudalmiddlefrontal_5'],
'dlpfc_2-rh':['caudalmiddlefrontal_3', 'caudalmiddlefrontal_4'],
'dlpfc_3-rh':['rostralmiddlefrontal_2', 'rostralmiddlefrontal_3'],
'dlpfc_4-rh':['rostralmiddlefrontal_1', 'rostralmiddlefrontal_5'],
'dlpfc_5-rh':['parstriangularis_2', 'parsopercularis_1'],
'dlpfc_6-rh':['parsopercularis_3', 'parsopercularis_4'],
'racc-rh':['rostralanteriorcingulate_1','rostralanteriorcingulate_2'],
'dacc-rh':['caudalanteriorcingulate_1','caudalanteriorcingulate_2','caudalanteriorcingulate_3'],
'pcc-rh':['posteriorcingulate_2','posteriorcingulate_3']}
for k,V in rhdict.iteritems():
label = np.sum([read_label(os.path.join(label_dir,'%s-%s.label' %(v,hemi)), subject=subject)
for v in V])
n_vert = np.intersect1d(src['vertno'], label.vertices).shape[0]
print '%s\t%s' %(n_vert,k)
label.save(os.path.join(out_dir, '%s.label' %k))
```
## Preprocesing 1: Raw Data
### Fixing MEWA: Digitization
Something got way messed up. Here we make MNE knows what is EEG and what is extra points.
NOTE: Copied over one of the original files for MEWA and renamed it MEWA_msit_unmasked_raw.fif
```
import os
import numpy as np
from mne.io import Raw
from pandas import read_table
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Specify parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
root_dir = '/space/sophia/2/users/EMOTE-DBS/afMSIT_april2016'
raw_file = 'MEWA_msit_unmasked_raw.fif'
out_file = 'MEWA_msit_raw.fif'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare digitizations.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load data. Get digitization from raw.
raw = Raw(os.path.join(root_dir,'raw',raw_file),preload=False,verbose=False)
digitization = raw.info['dig']
## The last 101 points are extra. Set them to kind=4.
for d in digitization[-101:]: d['kind'] = 4
## Get coordinates for EEG points (excluding ref/EOG).
rr = np.array([d['r'] for d in dig if d['kind']==3])[:-2]
## Get channels
chs = raw.info['chs']
## Update location information. This was a huge pain in the ass to figure out.
## We ignore the first four channels (Triggers, EOG) and the last channel (STI014).
for ch, r in zip(chs[4:-1], rr): ch['loc'][:3] = r
## Update digitization/chs.
raw.info['dig'] = digitization
raw.info['chs'] = chs
raw.save(os.path.join(root_dir,'raw',out_file), overwrite=True)
```
### Fixing MEWA: Masking channel jumps
Time windows were manually inspected. This step isn't strictly necessary but seemed to help with EOG projections.
NOTE: Copied over one of the original files for MEWA and renamed it MEWA_msit_unmasked_raw.fif
```
import os
import numpy as np
import pylab as plt
from mne.io import Raw, RawArray
## Specify parameters.
root_dir = '/space/sophia/2/users/EMOTE-DBS/afMSIT_april2016'
raw_file = 'MEWA_msit_unmasked_raw.fif'
## Load data.
raw = Raw(os.path.join(root_dir,'raw',raw_file),preload=True,verbose=False)
## Get data in matrix form.
data = raw._data
## Get list of usuable channels
ch_info = [(n,ch) for n,ch in enumerate(raw.ch_names)]
good_ch = [(n,ch) for n,ch in ch_info if ch not in raw.info['bads']]
good_ch = np.array(good_ch)[4:-1]
## Make mask.
mask = np.zeros(data.shape[1])
times = [(384,394), (663,669)]
for t1, t2 in times:
mask[(raw.times >= t1) & (raw.times <= t2)] += 1
mask = mask.astype(bool)
## Apply mask.
for ch in good_ch[:,0].astype(int):
data[ch,mask] = 0
## Make new array. Save.
raw = RawArray(data, raw.info, first_samp=raw.first_samp)
raw.add_eeg_average_proj()
raw.save(os.path.join(root_dir,'raw','MEWA_msit_raw.fif'), overwrite=True, verbose=False)
```
### Projections: EOG
```
import os
from mne import write_proj
from mne.preprocessing import compute_proj_eog
from mne.io import Raw
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Setup
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## File params.
root_dir = '/space/sophia/2/users/EMOTE-DBS/afMSIT_april2016'
subjects = ['BRTU', 'CHDR', 'CRDA', 'JADE', 'JASE', 'M5', 'MEWA', 'S2']
subjects = ['MEWA']
# NOTE: Not all subjects work with EOG channel = EOG.
# Some require other frontal channels due to concatenation.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main Loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
for subj in subjects:
print 'Making EOG file for %s.' %subj
## Load files.
raw_file = os.path.join( root_dir, 'raw', '%s_msit_raw.fif' %subj )
raw = Raw(raw_file, preload=True, verbose=False, add_eeg_ref=False)
raw.del_proj(0)
## Make EOG proj. Save.
proj, _ = compute_proj_eog(raw, n_eeg = 4, average=True, filter_length='20s',
reject=dict(eeg=5e-4), flat=dict(eeg=5e-8), ch_name='F2', n_jobs=3)
write_proj(os.path.join( root_dir, 'raw', '%s_msit_eog-proj.fif' %subj ), proj)
```
### Projections: ECG
```
import os
from mne import read_proj, write_proj
from mne.preprocessing import compute_proj_ecg
from mne.io import Raw
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Setup
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## File params.
root_dir = '/space/sophia/2/users/EMOTE-DBS/afMSIT_april2016'
subjects = ['CHDR']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main Loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
for subj in subjects:
print 'Making ECG file for %s.' %subj
## Load files.
raw_file = os.path.join( root_dir, 'raw', '%s_msit_raw.fif' %subj )
eog_file = os.path.join( root_dir, 'raw', '%s_msit-proj.fif' %subj )
raw = Raw(raw_file, preload=True, verbose=False)
eog_proj = read_proj(eog_file)
raw.add_proj(eog_proj, remove_existing=True)
raw.apply_proj()
## Make ECG proj. Save.
ecg_proj, _ = compute_proj_ecg(raw, n_eeg = 4, h_freq = 35., average=True, filter_length='20s',
reject=dict(eeg=5e-4), flat=dict(eeg=5e-8), ch_name='P9', n_jobs=3)
proj = eog_proj + [ecg for ecg in ecg_proj if ecg['desc'] not in [eog['desc'] for eog in eog_proj]]
write_proj(os.path.join( root_dir, 'raw', '%s_msit-proj.fif' %subj ), proj)
```
## Preprocessing 2: Epoching
### Make Forward Solutions
```
import os
from mne import read_trans, read_bem_solution, read_source_spaces
from mne import make_forward_solution, write_forward_solution
from mne.io import Raw
## Subject level parameters.
subjects = ['BRTU', 'CHDR', 'CRDA', 'JADE', 'JASE', 'M5', 'MEWA', 'S2']
task = 'msit'
## Main loop.
root_dir = '/autofs/space/sophia_002/users/EMOTE-DBS/afMSIT_april2016'
fs_dir = '/autofs/space/sophia_002/users/EMOTE-DBS/freesurfs'
for subject in subjects:
print 'Making forward solution for %s.' %subject
## Load files.
raw = Raw(os.path.join(root_dir, 'raw', '%s_msit_raw.fif' %subject), preload=False, verbose=False)
trans = read_trans(os.path.join(fs_dir,subject,'mri','T1-neuromag','sets','COR-%s.fif' %subject))
src = read_source_spaces(os.path.join(fs_dir,subject,'bem','%s-oct-6p-src.fif' %subject), verbose=False)
bem = read_bem_solution(os.path.join(fs_dir,subject,'bem','%s-5120-5120-5120-bem-sol.fif' %subject), verbose=False)
## Compute and save forward solution.
make_forward_solution(raw.info, trans, src, bem, fname=os.path.join(root_dir,'fwd','%s_msit-fwd.fif' %subject),
meg=False, eeg=True, mindist=1.0, overwrite=True, n_jobs=3, verbose=False)
print 'Done.'
```
### Make Epochs
```
import os
import numpy as np
from mne import compute_covariance, Epochs, EpochsArray, find_events, read_proj, pick_types, set_log_level
from mne.io import Raw
from pandas import read_csv
set_log_level(verbose=False)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Subject level parameters.
subjects = ['BRTU', 'CHDR', 'CRDA', 'JADE', 'JASE', 'M5', 'MEWA', 'S2']
task = 'msit'
## Filtering parameters.
l_freq = 0.5
h_freq = 50
l_trans_bandwidth = l_freq / 2.
h_trans_bandwidth = 1.0
filter_length = '20s'
n_jobs = 3
## Epoching parameters.
event_id = dict( FN=1, FI=2, NN=3, NI=4 ) # Alik's convention, isn't he smart!?
tmin = -1.5 # Leave some breathing room.
tmax = 3.4 # Trial is 1900ms, leave 1500ms of room.
resp_buffer = 1.5 # 1500ms on either side of response.
baseline = (-0.5,-0.1)
reject_tmin = -0.5
reject_tmax = 1.9
reject = dict(eeg=150e-6)
flat = dict(eeg=5e-7)
detrend = None
decim = 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load behavior.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
root_dir = '/space/sophia/2/users/EMOTE-DBS/afMSIT'
data_file = os.path.join( root_dir, 'behavior', 'afMSIT_group_data.csv' )
df = read_csv(data_file)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load behavior.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
for subj in subjects:
print 'Loading data for %s.' %subj
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Define paths.
raw_file = os.path.join( root_dir, 'raw', '%s_%s_raw.fif' %(subj,task) )
proj_file = os.path.join( root_dir, 'raw', '%s_%s-proj.fif' %(subj,task) )
# Load data.
raw = Raw(raw_file,preload=True,verbose=False)
proj = read_proj(proj_file)
## Add projections.
proj = [p for p in proj if 'ref' not in p['desc']]
raw.add_proj(proj, remove_existing=True)
raw.add_eeg_average_proj()
raw.apply_proj()
print raw.info['projs']
## Reduce dataframe to subject.
data = df[df.Subject==subj]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Make events.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print 'Identifying events for %s.' %subj,
events = find_events(raw, stim_channel='Trig1', output='onset', min_duration=0.25, verbose=False)
# Error catching.
if data.shape[0] != events.shape[0]: raise ValueError('Mismatching number of stimulus onsets!')
print '%s events found.' %events.shape[0]
# Update event identifiers.
n = 1
for dbs in [0,1]:
for cond in [0,1]:
ix, = np.where((data.DBS==dbs)&(data.Interference==cond))
events[ix,-1] = n
n+=1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Filter
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print 'Applying bandpass filter to raw [%s, %s].' %(l_freq, h_freq)
Fs = raw.info['sfreq']
raw.filter(l_freq = l_freq, h_freq = h_freq, filter_length=filter_length, n_jobs=n_jobs,
l_trans_bandwidth=l_trans_bandwidth, h_trans_bandwidth=h_trans_bandwidth)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Make stimulus-locked epochs.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Build initial epochs object.
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
epochs = Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax, baseline=baseline, picks=picks,
reject=reject, flat=flat, reject_tmin=reject_tmin, reject_tmax=reject_tmax,
proj=True, detrend=detrend, decim=decim)
# First round of rejections.
epochs.drop_bad() # Remove bad epochs.
copy = data.ix[[True if not log else False for log in epochs.drop_log]] # Update CSV based on rejections.
'''NOTE: Making a new dataframe copy is just a shortcut for easy indexing between the Pandas
DataFrame and the Epochs object. This is due to the three rounds of rejections being
applied to the data (e.g. amplitude, behavior exclusion, equalization).'''
# Drop epochs based on behavior.
epochs.drop(copy.Exclude.astype(bool))
data = data.ix[[True if not log else False for log in epochs.drop_log]]
print '%s trials remain after rejections.' %(len(epochs))
print epochs
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Make Response-locked epochs.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print 'Making response-locked epochs.'
# Build response-locked events.
response_indices = raw.time_as_index(0.4 + data.RT) # Compensating for MSIT-lock.
response_events = epochs.events.copy()
response_events[:,0] = response_events[:,0] + response_indices
# Get data.
arr = epochs.get_data()
times = epochs.times
# Calculate lengths of response-locked epochs.
response_times = data.RT + 0.4 # Compensating for MSIT-lock.
response_windows = np.array([response_times-resp_buffer, response_times+resp_buffer]).T
# Iteratively build epochs array.
trials = []
for n in xrange(len(epochs)):
mask = (times >= response_windows[n,0]) & (times <= response_windows[n,1])
trials.append( arr[n,:,mask] )
trials = np.array(trials).swapaxes(1,2)
# Finally, make epochs objects.
resp_epochs = EpochsArray(trials, epochs.info, response_events, tmin=-resp_buffer, event_id=event_id,)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Save data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print 'Saving epoch files.'
epochs.save(os.path.join(root_dir,'ave','%s_%s_%s_stim-epo.fif' %(subj,task,h_freq)))
resp_epochs.save(os.path.join(root_dir,'ave','%s_%s_%s_resp-epo.fif' %(subj,task,h_freq)))
data.to_csv(os.path.join(root_dir,'ave','%s_%s_%s-epo.csv' %(subj,task,h_freq)), index=False)
print '\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n'
print 'Done.'
```
### Make Covariance Matrices / Inverse Solutions / Morph Maps
```
import os
from mne import EpochsArray, read_epochs, read_forward_solution, set_log_level
from mne import compute_covariance, write_cov
from mne import compute_morph_matrix, read_source_spaces
from mne.filter import low_pass_filter
from mne.minimum_norm import make_inverse_operator, write_inverse_operator
from scipy.io import savemat
set_log_level(verbose=False)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Subject level parameters.
subjects = ['BRTU', 'CHDR', 'CRDA', 'JADE', 'JASE', 'M5', 'MEWA', 'S2']
task = 'msit'
## Analysis parameters.
fmax = 50
## Source localization parameters.
loose = 0.2
depth = 0.8
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Iteratively load and prepare data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
root_dir = '/autofs/space/sophia_002/users/EMOTE-DBS/afMSIT'
fs_dir = '/autofs/space/sophia_002/users/EMOTE-DBS/freesurfs'
src = read_source_spaces(os.path.join(fs_dir,'fscopy','bem','fscopy-oct-6p-src.fif'))
for subject in subjects:
print 'Processing %s' %subject
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load files.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load in files.
epo_file = os.path.join(root_dir,'ave','%s_msit_%s_stim-epo.fif' %(subject,fmax))
epochs = read_epochs(epo_file, verbose=False)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Secondary objects.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
fwd = read_forward_solution(os.path.join(root_dir, 'fwd', '%s_%s-fwd.fif' %(subject,task)),
surf_ori=True, verbose=False)
## Compute/save noise covariance matrix & inverse operator.
noise_cov = compute_covariance(epochs, tmin=-0.5, tmax=0.0, method='shrunk', n_jobs=1)
write_cov(os.path.join(root_dir,'cov','%s_%s_%s-cov.fif' %(subject,task,h_freq)), noise_cov)
inv = make_inverse_operator(epochs.info, fwd, noise_cov, loose=loose, depth=depth, verbose=False)
write_inverse_operator(os.path.join(root_dir,'cov','%s_%s_%s-inv.fif' %(subject,task,fmax)), inv)
## Pre-compute morph matrix.
vertices_from = [inv['src'][n]['vertno'] for n in xrange(2)]
vertices_to = [src[n]['vertno'] for n in xrange(2)]
morph_mat = compute_morph_matrix(subject, 'fsaverage', vertices_from=vertices_from,
vertices_to=vertices_to,subjects_dir=fs_dir, smooth=25)
savemat(os.path.join(root_dir, 'morph_maps', '%s-fsaverage_morph.mat' %subject),
mdict=dict(morph_mat=morph_mat))
print 'Done.'
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# 理解语言的 Transformer 模型
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://tensorflow.google.cn/tutorials/text/transformer">
<img src="https://tensorflow.google.cn/images/tf_logo_32px.png" />
在 tensorflow.google.cn 上查看</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/text/transformer.ipynb">
<img src="https://tensorflow.google.cn/images/colab_logo_32px.png" />
在 Google Colab 运行</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/text/transformer.ipynb">
<img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png" />
在 Github 上查看源代码</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/tutorials/text/transformer.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png" />下载此 notebook</a>
</td>
</table>
Note: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的
[官方英文文档](https://tensorflow.google.cn/?hl=en)。如果您有改进此翻译的建议, 请提交 pull request 到
[tensorflow/docs](https://github.com/tensorflow/docs) GitHub 仓库。要志愿地撰写或者审核译文,请加入
[docs-zh-cn@tensorflow.org Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-zh-cn)
本教程训练了一个 <a href="https://arxiv.org/abs/1706.03762" class="external">Transformer 模型</a> 用于将葡萄牙语翻译成英语。这是一个高级示例,假定您具备[文本生成(text generation)](text_generation.ipynb)和 [注意力机制(attention)](nmt_with_attention.ipynb) 的知识。
Transformer 模型的核心思想是*自注意力机制(self-attention)*——能注意输入序列的不同位置以计算该序列的表示的能力。Transformer 创建了多层自注意力层(self-attetion layers)组成的堆栈,下文的*按比缩放的点积注意力(Scaled dot product attention)*和*多头注意力(Multi-head attention)*部分对此进行了说明。
一个 transformer 模型用自注意力层而非 [RNNs](text_classification_rnn.ipynb) 或 [CNNs](../images/intro_to_cnns.ipynb) 来处理变长的输入。这种通用架构有一系列的优势:
* 它不对数据间的时间/空间关系做任何假设。这是处理一组对象(objects)的理想选择(例如,[星际争霸单位(StarCraft units)](https://deepmind.com/blog/alphastar-mastering-real-time-strategy-game-starcraft-ii/#block-8))。
* 层输出可以并行计算,而非像 RNN 这样的序列计算。
* 远距离项可以影响彼此的输出,而无需经过许多 RNN 步骤或卷积层(例如,参见[场景记忆 Transformer(Scene Memory Transformer)](https://arxiv.org/pdf/1903.03878.pdf))
* 它能学习长距离的依赖。在许多序列任务中,这是一项挑战。
该架构的缺点是:
* 对于时间序列,一个单位时间的输出是从*整个历史记录*计算的,而非仅从输入和当前的隐含状态计算得到。这*可能*效率较低。
* 如果输入*确实*有时间/空间的关系,像文本,则必须加入一些位置编码,否则模型将有效地看到一堆单词。
在此 notebook 中训练完模型后,您将能输入葡萄牙语句子,得到其英文翻译。
<img src="https://tensorflow.google.cn/images/tutorials/transformer/attention_map_portuguese.png" width="800" alt="Attention heatmap">
```
import tensorflow_datasets as tfds
import tensorflow as tf
import time
import numpy as np
import matplotlib.pyplot as plt
```
## 设置输入流水线(input pipeline)
使用 [TFDS](https://tensorflow.google.cn/datasets) 来导入 [葡萄牙语-英语翻译数据集](https://github.com/neulab/word-embeddings-for-nmt),该数据集来自于 [TED 演讲开放翻译项目](https://www.ted.com/participate/translate).
该数据集包含来约 50000 条训练样本,1100 条验证样本,以及 2000 条测试样本。
```
examples, metadata = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True,
as_supervised=True)
train_examples, val_examples = examples['train'], examples['validation']
```
从训练数据集创建自定义子词分词器(subwords tokenizer)。
```
tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(en.numpy() for pt, en in train_examples), target_vocab_size=2**13)
tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(pt.numpy() for pt, en in train_examples), target_vocab_size=2**13)
sample_string = 'Transformer is awesome.'
tokenized_string = tokenizer_en.encode(sample_string)
print ('Tokenized string is {}'.format(tokenized_string))
original_string = tokenizer_en.decode(tokenized_string)
print ('The original string: {}'.format(original_string))
assert original_string == sample_string
```
如果单词不在词典中,则分词器(tokenizer)通过将单词分解为子词来对字符串进行编码。
```
for ts in tokenized_string:
print ('{} ----> {}'.format(ts, tokenizer_en.decode([ts])))
BUFFER_SIZE = 20000
BATCH_SIZE = 64
```
将开始和结束标记(token)添加到输入和目标。
```
def encode(lang1, lang2):
lang1 = [tokenizer_pt.vocab_size] + tokenizer_pt.encode(
lang1.numpy()) + [tokenizer_pt.vocab_size+1]
lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode(
lang2.numpy()) + [tokenizer_en.vocab_size+1]
return lang1, lang2
```
Note:为了使本示例较小且相对较快,删除长度大于40个标记的样本。
```
MAX_LENGTH = 40
def filter_max_length(x, y, max_length=MAX_LENGTH):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
```
`.map()` 内部的操作以图模式(graph mode)运行,`.map()` 接收一个不具有 numpy 属性的图张量(graph tensor)。该`分词器(tokenizer)`需要将一个字符串或 Unicode 符号,编码成整数。因此,您需要在 `tf.py_function` 内部运行编码过程,`tf.py_function` 接收一个 eager 张量,该 eager 张量有一个包含字符串值的 numpy 属性。
```
def tf_encode(pt, en):
result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
return result_pt, result_en
train_dataset = train_examples.map(tf_encode)
train_dataset = train_dataset.filter(filter_max_length)
# 将数据集缓存到内存中以加快读取速度。
train_dataset = train_dataset.cache()
train_dataset = train_dataset.shuffle(BUFFER_SIZE).padded_batch(BATCH_SIZE)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_examples.map(tf_encode)
val_dataset = val_dataset.filter(filter_max_length).padded_batch(BATCH_SIZE)
pt_batch, en_batch = next(iter(val_dataset))
pt_batch, en_batch
```
## 位置编码(Positional encoding)
因为该模型并不包括任何的循环(recurrence)或卷积,所以模型添加了位置编码,为模型提供一些关于单词在句子中相对位置的信息。
位置编码向量被加到嵌入(embedding)向量中。嵌入表示一个 d 维空间的标记,在 d 维空间中有着相似含义的标记会离彼此更近。但是,嵌入并没有对在一句话中的词的相对位置进行编码。因此,当加上位置编码后,词将基于*它们含义的相似度以及它们在句子中的位置*,在 d 维空间中离彼此更近。
参看 [位置编码](https://github.com/tensorflow/examples/blob/master/community/en/position_encoding.ipynb) 的 notebook 了解更多信息。计算位置编码的公式如下:
$$\Large{PE_{(pos, 2i)} = sin(pos / 10000^{2i / d_{model}})} $$
$$\Large{PE_{(pos, 2i+1)} = cos(pos / 10000^{2i / d_{model}})} $$
```
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# 将 sin 应用于数组中的偶数索引(indices);2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# 将 cos 应用于数组中的奇数索引;2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
pos_encoding = positional_encoding(50, 512)
print (pos_encoding.shape)
plt.pcolormesh(pos_encoding[0], cmap='RdBu')
plt.xlabel('Depth')
plt.xlim((0, 512))
plt.ylabel('Position')
plt.colorbar()
plt.show()
```
## 遮挡(Masking)
遮挡一批序列中所有的填充标记(pad tokens)。这确保了模型不会将填充作为输入。该 mask 表明填充值 `0` 出现的位置:在这些位置 mask 输出 `1`,否则输出 `0`。
```
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# 添加额外的维度来将填充加到
# 注意力对数(logits)。
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
x = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
create_padding_mask(x)
```
前瞻遮挡(look-ahead mask)用于遮挡一个序列中的后续标记(future tokens)。换句话说,该 mask 表明了不应该使用的条目。
这意味着要预测第三个词,将仅使用第一个和第二个词。与此类似,预测第四个词,仅使用第一个,第二个和第三个词,依此类推。
```
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
x = tf.random.uniform((1, 3))
temp = create_look_ahead_mask(x.shape[1])
temp
```
## 按比缩放的点积注意力(Scaled dot product attention)
<img src="https://tensorflow.google.cn/images/tutorials/transformer/scaled_attention.png" width="500" alt="scaled_dot_product_attention">
Transformer 使用的注意力函数有三个输入:Q(请求(query))、K(主键(key))、V(数值(value))。用于计算注意力权重的等式为:
$$\Large{Attention(Q, K, V) = softmax_k(\frac{QK^T}{\sqrt{d_k}}) V} $$
点积注意力被缩小了深度的平方根倍。这样做是因为对于较大的深度值,点积的大小会增大,从而推动 softmax 函数往仅有很小的梯度的方向靠拢,导致了一种很硬的(hard)softmax。
例如,假设 `Q` 和 `K` 的均值为0,方差为1。它们的矩阵乘积将有均值为0,方差为 `dk`。因此,*`dk` 的平方根*被用于缩放(而非其他数值),因为,`Q` 和 `K` 的矩阵乘积的均值本应该为 0,方差本应该为1,这样会获得一个更平缓的 softmax。
遮挡(mask)与 -1e9(接近于负无穷)相乘。这样做是因为遮挡与缩放的 Q 和 K 的矩阵乘积相加,并在 softmax 之前立即应用。目标是将这些单元归零,因为 softmax 的较大负数输入在输出中接近于零。
```
def scaled_dot_product_attention(q, k, v, mask):
"""计算注意力权重。
q, k, v 必须具有匹配的前置维度。
k, v 必须有匹配的倒数第二个维度,例如:seq_len_k = seq_len_v。
虽然 mask 根据其类型(填充或前瞻)有不同的形状,
但是 mask 必须能进行广播转换以便求和。
参数:
q: 请求的形状 == (..., seq_len_q, depth)
k: 主键的形状 == (..., seq_len_k, depth)
v: 数值的形状 == (..., seq_len_v, depth_v)
mask: Float 张量,其形状能转换成
(..., seq_len_q, seq_len_k)。默认为None。
返回值:
输出,注意力权重
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# 缩放 matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# 将 mask 加入到缩放的张量上。
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax 在最后一个轴(seq_len_k)上归一化,因此分数
# 相加等于1。
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
```
当 softmax 在 K 上进行归一化后,它的值决定了分配到 Q 的重要程度。
输出表示注意力权重和 V(数值)向量的乘积。这确保了要关注的词保持原样,而无关的词将被清除掉。
```
def print_out(q, k, v):
temp_out, temp_attn = scaled_dot_product_attention(
q, k, v, None)
print ('Attention weights are:')
print (temp_attn)
print ('Output is:')
print (temp_out)
np.set_printoptions(suppress=True)
temp_k = tf.constant([[10,0,0],
[0,10,0],
[0,0,10],
[0,0,10]], dtype=tf.float32) # (4, 3)
temp_v = tf.constant([[ 1,0],
[ 10,0],
[ 100,5],
[1000,6]], dtype=tf.float32) # (4, 2)
# 这条 `请求(query)符合第二个`主键(key)`,
# 因此返回了第二个`数值(value)`。
temp_q = tf.constant([[0, 10, 0]], dtype=tf.float32) # (1, 3)
print_out(temp_q, temp_k, temp_v)
# 这条请求符合重复出现的主键(第三第四个),
# 因此,对所有的相关数值取了平均。
temp_q = tf.constant([[0, 0, 10]], dtype=tf.float32) # (1, 3)
print_out(temp_q, temp_k, temp_v)
# 这条请求符合第一和第二条主键,
# 因此,对它们的数值去了平均。
temp_q = tf.constant([[10, 10, 0]], dtype=tf.float32) # (1, 3)
print_out(temp_q, temp_k, temp_v)
```
将所有请求一起*传递*。
```
temp_q = tf.constant([[0, 0, 10], [0, 10, 0], [10, 10, 0]], dtype=tf.float32) # (3, 3)
print_out(temp_q, temp_k, temp_v)
```
## 多头注意力(Multi-head attention)
<img src="https://tensorflow.google.cn/images/tutorials/transformer/multi_head_attention.png" width="500" alt="multi-head attention">
多头注意力由四部分组成:
* 线性层并分拆成多头。
* 按比缩放的点积注意力。
* 多头及联。
* 最后一层线性层。
每个多头注意力块有三个输入:Q(请求)、K(主键)、V(数值)。这些输入经过线性(Dense)层,并分拆成多头。
将上面定义的 `scaled_dot_product_attention` 函数应用于每个头(进行了广播(broadcasted)以提高效率)。注意力这步必须使用一个恰当的 mask。然后将每个头的注意力输出连接起来(用`tf.transpose` 和 `tf.reshape`),并放入最后的 `Dense` 层。
Q、K、和 V 被拆分到了多个头,而非单个的注意力头,因为多头允许模型共同注意来自不同表示空间的不同位置的信息。在分拆后,每个头部的维度减少,因此总的计算成本与有着全部维度的单个注意力头相同。
```
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""分拆最后一个维度到 (num_heads, depth).
转置结果使得形状为 (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
```
创建一个 `MultiHeadAttention` 层进行尝试。在序列中的每个位置 `y`,`MultiHeadAttention` 在序列中的所有其他位置运行所有8个注意力头,在每个位置y,返回一个新的同样长度的向量。
```
temp_mha = MultiHeadAttention(d_model=512, num_heads=8)
y = tf.random.uniform((1, 60, 512)) # (batch_size, encoder_sequence, d_model)
out, attn = temp_mha(y, k=y, q=y, mask=None)
out.shape, attn.shape
```
## 点式前馈网络(Point wise feed forward network)
点式前馈网络由两层全联接层组成,两层之间有一个 ReLU 激活函数。
```
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
sample_ffn = point_wise_feed_forward_network(512, 2048)
sample_ffn(tf.random.uniform((64, 50, 512))).shape
```
## 编码与解码(Encoder and decoder)
<img src="https://tensorflow.google.cn/images/tutorials/transformer/transformer.png" width="600" alt="transformer">
Transformer 模型与标准的[具有注意力机制的序列到序列模型(sequence to sequence with attention model)](nmt_with_attention.ipynb),遵循相同的一般模式。
* 输入语句经过 `N` 个编码器层,为序列中的每个词/标记生成一个输出。
* 解码器关注编码器的输出以及它自身的输入(自注意力)来预测下一个词。
### 编码器层(Encoder layer)
每个编码器层包括以下子层:
1. 多头注意力(有填充遮挡)
2. 点式前馈网络(Point wise feed forward networks)。
每个子层在其周围有一个残差连接,然后进行层归一化。残差连接有助于避免深度网络中的梯度消失问题。
每个子层的输出是 `LayerNorm(x + Sublayer(x))`。归一化是在 `d_model`(最后一个)维度完成的。Transformer 中有 N 个编码器层。
```
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
sample_encoder_layer = EncoderLayer(512, 8, 2048)
sample_encoder_layer_output = sample_encoder_layer(
tf.random.uniform((64, 43, 512)), False, None)
sample_encoder_layer_output.shape # (batch_size, input_seq_len, d_model)
```
### 解码器层(Decoder layer)
每个解码器层包括以下子层:
1. 遮挡的多头注意力(前瞻遮挡和填充遮挡)
2. 多头注意力(用填充遮挡)。V(数值)和 K(主键)接收*编码器输出*作为输入。Q(请求)接收*遮挡的多头注意力子层的输出*。
3. 点式前馈网络
每个子层在其周围有一个残差连接,然后进行层归一化。每个子层的输出是 `LayerNorm(x + Sublayer(x))`。归一化是在 `d_model`(最后一个)维度完成的。
Transformer 中共有 N 个解码器层。
当 Q 接收到解码器的第一个注意力块的输出,并且 K 接收到编码器的输出时,注意力权重表示根据编码器的输出赋予解码器输入的重要性。换一种说法,解码器通过查看编码器输出和对其自身输出的自注意力,预测下一个词。参看按比缩放的点积注意力部分的演示。
```
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
sample_decoder_layer = DecoderLayer(512, 8, 2048)
sample_decoder_layer_output, _, _ = sample_decoder_layer(
tf.random.uniform((64, 50, 512)), sample_encoder_layer_output,
False, None, None)
sample_decoder_layer_output.shape # (batch_size, target_seq_len, d_model)
```
### 编码器(Encoder)
`编码器` 包括:
1. 输入嵌入(Input Embedding)
2. 位置编码(Positional Encoding)
3. N 个编码器层(encoder layers)
输入经过嵌入(embedding)后,该嵌入与位置编码相加。该加法结果的输出是编码器层的输入。编码器的输出是解码器的输入。
```
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding,
self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# 将嵌入和位置编码相加。
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
sample_encoder = Encoder(num_layers=2, d_model=512, num_heads=8,
dff=2048, input_vocab_size=8500,
maximum_position_encoding=10000)
sample_encoder_output = sample_encoder(tf.random.uniform((64, 62)),
training=False, mask=None)
print (sample_encoder_output.shape) # (batch_size, input_seq_len, d_model)
```
### 解码器(Decoder)
`解码器`包括:
1. 输出嵌入(Output Embedding)
2. 位置编码(Positional Encoding)
3. N 个解码器层(decoder layers)
目标(target)经过一个嵌入后,该嵌入和位置编码相加。该加法结果是解码器层的输入。解码器的输出是最后的线性层的输入。
```
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
maximum_position_encoding, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i+1)] = block1
attention_weights['decoder_layer{}_block2'.format(i+1)] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
sample_decoder = Decoder(num_layers=2, d_model=512, num_heads=8,
dff=2048, target_vocab_size=8000,
maximum_position_encoding=5000)
output, attn = sample_decoder(tf.random.uniform((64, 26)),
enc_output=sample_encoder_output,
training=False, look_ahead_mask=None,
padding_mask=None)
output.shape, attn['decoder_layer2_block2'].shape
```
## 创建 Transformer
Transformer 包括编码器,解码器和最后的线性层。解码器的输出是线性层的输入,返回线性层的输出。
```
class Transformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
target_vocab_size, pe_input, pe_target, rate=0.1):
super(Transformer, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff,
input_vocab_size, pe_input, rate)
self.decoder = Decoder(num_layers, d_model, num_heads, dff,
target_vocab_size, pe_target, rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask,
look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
sample_transformer = Transformer(
num_layers=2, d_model=512, num_heads=8, dff=2048,
input_vocab_size=8500, target_vocab_size=8000,
pe_input=10000, pe_target=6000)
temp_input = tf.random.uniform((64, 62))
temp_target = tf.random.uniform((64, 26))
fn_out, _ = sample_transformer(temp_input, temp_target, training=False,
enc_padding_mask=None,
look_ahead_mask=None,
dec_padding_mask=None)
fn_out.shape # (batch_size, tar_seq_len, target_vocab_size)
```
## 配置超参数(hyperparameters)
为了让本示例小且相对较快,已经减小了*num_layers、 d_model 和 dff* 的值。
Transformer 的基础模型使用的数值为:*num_layers=6*,*d_model = 512*,*dff = 2048*。关于所有其他版本的 Transformer,请查阅[论文](https://arxiv.org/abs/1706.03762)。
Note:通过改变以下数值,您可以获得在许多任务上达到最先进水平的模型。
```
num_layers = 4
d_model = 128
dff = 512
num_heads = 8
input_vocab_size = tokenizer_pt.vocab_size + 2
target_vocab_size = tokenizer_en.vocab_size + 2
dropout_rate = 0.1
```
## 优化器(Optimizer)
根据[论文](https://arxiv.org/abs/1706.03762)中的公式,将 Adam 优化器与自定义的学习速率调度程序(scheduler)配合使用。
$$\Large{lrate = d_{model}^{-0.5} * min(step{\_}num^{-0.5}, step{\_}num * warmup{\_}steps^{-1.5})}$$
```
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
temp_learning_rate_schedule = CustomSchedule(d_model)
plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
```
## 损失函数与指标(Loss and metrics)
由于目标序列是填充(padded)过的,因此在计算损失函数时,应用填充遮挡非常重要。
```
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='train_accuracy')
```
## 训练与检查点(Training and checkpointing)
```
transformer = Transformer(num_layers, d_model, num_heads, dff,
input_vocab_size, target_vocab_size,
pe_input=input_vocab_size,
pe_target=target_vocab_size,
rate=dropout_rate)
def create_masks(inp, tar):
# 编码器填充遮挡
enc_padding_mask = create_padding_mask(inp)
# 在解码器的第二个注意力模块使用。
# 该填充遮挡用于遮挡编码器的输出。
dec_padding_mask = create_padding_mask(inp)
# 在解码器的第一个注意力模块使用。
# 用于填充(pad)和遮挡(mask)解码器获取到的输入的后续标记(future tokens)。
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
```
创建检查点的路径和检查点管理器(manager)。这将用于在每 `n` 个周期(epochs)保存检查点。
```
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(transformer=transformer,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
# 如果检查点存在,则恢复最新的检查点。
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print ('Latest checkpoint restored!!')
```
目标(target)被分成了 tar_inp 和 tar_real。tar_inp 作为输入传递到解码器。`tar_real` 是位移了 1 的同一个输入:在 `tar_inp` 中的每个位置,`tar_real` 包含了应该被预测到的下一个标记(token)。
例如,`sentence` = "SOS A lion in the jungle is sleeping EOS"
`tar_inp` = "SOS A lion in the jungle is sleeping"
`tar_real` = "A lion in the jungle is sleeping EOS"
Transformer 是一个自回归(auto-regressive)模型:它一次作一个部分的预测,然后使用到目前为止的自身的输出来决定下一步要做什么。
在训练过程中,本示例使用了 teacher-forcing 的方法(就像[文本生成教程](./text_generation.ipynb)中一样)。无论模型在当前时间步骤下预测出什么,teacher-forcing 方法都会将真实的输出传递到下一个时间步骤上。
当 transformer 预测每个词时,*自注意力(self-attention)*功能使它能够查看输入序列中前面的单词,从而更好地预测下一个单词。
为了防止模型在期望的输出上达到峰值,模型使用了前瞻遮挡(look-ahead mask)。
```
EPOCHS = 20
# 该 @tf.function 将追踪-编译 train_step 到 TF 图中,以便更快地
# 执行。该函数专用于参数张量的精确形状。为了避免由于可变序列长度或可变
# 批次大小(最后一批次较小)导致的再追踪,使用 input_signature 指定
# 更多的通用形状。
train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
]
@tf.function(input_signature=train_step_signature)
def train_step(inp, tar):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
```
葡萄牙语作为输入语言,英语为目标语言。
```
for epoch in range(EPOCHS):
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
# inp -> portuguese, tar -> english
for (batch, (inp, tar)) in enumerate(train_dataset):
train_step(inp, tar)
if batch % 50 == 0:
print ('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(
epoch + 1, batch, train_loss.result(), train_accuracy.result()))
if (epoch + 1) % 5 == 0:
ckpt_save_path = ckpt_manager.save()
print ('Saving checkpoint for epoch {} at {}'.format(epoch+1,
ckpt_save_path))
print ('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1,
train_loss.result(),
train_accuracy.result()))
print ('Time taken for 1 epoch: {} secs\n'.format(time.time() - start))
```
## 评估(Evaluate)
以下步骤用于评估:
* 用葡萄牙语分词器(`tokenizer_pt`)编码输入语句。此外,添加开始和结束标记,这样输入就与模型训练的内容相同。这是编码器输入。
* 解码器输入为 `start token == tokenizer_en.vocab_size`。
* 计算填充遮挡和前瞻遮挡。
* `解码器`通过查看`编码器输出`和它自身的输出(自注意力)给出预测。
* 选择最后一个词并计算它的 argmax。
* 将预测的词连接到解码器输入,然后传递给解码器。
* 在这种方法中,解码器根据它预测的之前的词预测下一个。
Note:这里使用的模型具有较小的能力以保持相对较快,因此预测可能不太正确。要复现论文中的结果,请使用全部数据集,并通过修改上述超参数来使用基础 transformer 模型或者 transformer XL。
```
def evaluate(inp_sentence):
start_token = [tokenizer_pt.vocab_size]
end_token = [tokenizer_pt.vocab_size + 1]
# 输入语句是葡萄牙语,增加开始和结束标记
inp_sentence = start_token + tokenizer_pt.encode(inp_sentence) + end_token
encoder_input = tf.expand_dims(inp_sentence, 0)
# 因为目标是英语,输入 transformer 的第一个词应该是
# 英语的开始标记。
decoder_input = [tokenizer_en.vocab_size]
output = tf.expand_dims(decoder_input, 0)
for i in range(MAX_LENGTH):
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(
encoder_input, output)
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = transformer(encoder_input,
output,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
# 从 seq_len 维度选择最后一个词
predictions = predictions[: ,-1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# 如果 predicted_id 等于结束标记,就返回结果
if predicted_id == tokenizer_en.vocab_size+1:
return tf.squeeze(output, axis=0), attention_weights
# 连接 predicted_id 与输出,作为解码器的输入传递到解码器。
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0), attention_weights
def plot_attention_weights(attention, sentence, result, layer):
fig = plt.figure(figsize=(16, 8))
sentence = tokenizer_pt.encode(sentence)
attention = tf.squeeze(attention[layer], axis=0)
for head in range(attention.shape[0]):
ax = fig.add_subplot(2, 4, head+1)
# 画出注意力权重
ax.matshow(attention[head][:-1, :], cmap='viridis')
fontdict = {'fontsize': 10}
ax.set_xticks(range(len(sentence)+2))
ax.set_yticks(range(len(result)))
ax.set_ylim(len(result)-1.5, -0.5)
ax.set_xticklabels(
['<start>']+[tokenizer_pt.decode([i]) for i in sentence]+['<end>'],
fontdict=fontdict, rotation=90)
ax.set_yticklabels([tokenizer_en.decode([i]) for i in result
if i < tokenizer_en.vocab_size],
fontdict=fontdict)
ax.set_xlabel('Head {}'.format(head+1))
plt.tight_layout()
plt.show()
def translate(sentence, plot=''):
result, attention_weights = evaluate(sentence)
predicted_sentence = tokenizer_en.decode([i for i in result
if i < tokenizer_en.vocab_size])
print('Input: {}'.format(sentence))
print('Predicted translation: {}'.format(predicted_sentence))
if plot:
plot_attention_weights(attention_weights, sentence, result, plot)
translate("este é um problema que temos que resolver.")
print ("Real translation: this is a problem we have to solve .")
translate("os meus vizinhos ouviram sobre esta ideia.")
print ("Real translation: and my neighboring homes heard about this idea .")
translate("vou então muito rapidamente partilhar convosco algumas histórias de algumas coisas mágicas que aconteceram.")
print ("Real translation: so i 'll just share with you some stories very quickly of some magical things that have happened .")
```
您可以为 `plot` 参数传递不同的层和解码器的注意力模块。
```
translate("este é o primeiro livro que eu fiz.", plot='decoder_layer4_block2')
print ("Real translation: this is the first book i've ever done.")
```
## 总结
在本教程中,您已经学习了位置编码,多头注意力,遮挡的重要性以及如何创建一个 transformer。
尝试使用一个不同的数据集来训练 transformer。您可也可以通过修改上述的超参数来创建基础 transformer 或者 transformer XL。您也可以使用这里定义的层来创建 [BERT](https://arxiv.org/abs/1810.04805) 并训练最先进的模型。此外,您可以实现 beam search 得到更好的预测。
| github_jupyter |
##### 1

##### 2

##### 3

##### 4

##### 5

##### 6

##### 7

##### 8

##### 9

##### 10

##### 11

##### 12

##### 13

##### 14

##### 15

##### 16

##### 17

##### 18

##### 19

##### 20

##### 21

##### 22

##### 23

##### 24

##### 25

##### 26

##### 27

##### 28

##### 29

##### 30

##### 31

##### 32

##### 33

##### 34

##### 35

##### 36

##### 37

##### 38

##### 39

##### 40

##### 41

##### 42

| github_jupyter |
```
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
dtypes = {'nt_fp': str, 'event_date': str, 'day_of_week': int, 'a_month': int, 'hour_of_day': int, 's_depcity': str, 's_adults': int,
's_children': int, 'ua_browser': str, 'ua_device_type': str, 'ua_platform': str, 'ip_lat': int, 'ip_long': int,
's_country': str}
parse_dates = ['event_date']
data_df = pd.read_csv('~/Desktop/events-by-week.csv', index_col=False, dtype=dtypes, parse_dates=parse_dates)
data_df['week_of_year'] = data_df['event_date'].dt.week
data_target = data_df['s_country']
# data_features = data_df.drop(columns=['s_country', 'event_date', 'nt_fp', 's_depcity',
# 'ua_device_type', 'ua_browser', 'day_of_week', 'ip_lat',
# 'ua_platform', 'hour_of_day', 'a_month'])
data_features = data_df.drop(columns=['s_country', 'event_date', 'nt_fp', 's_depcity',
'ua_device_type', 'ua_browser', 'day_of_week', 'week_of_year',
'ua_platform', 'hour_of_day', 'a_month'])
data_dummies = pd.get_dummies(data_features)
# data_dummies = data_features
index_split = int (round(data_dummies.shape[0] * 0.75, 0))
train_X = data_dummies[:index_split][:]
test_X = data_dummies[index_split:][:]
train_y = data_target[:index_split][:]
test_y = data_target[index_split:][:]
X_train = train_X.values
X_test = test_X.values
y_train = train_y.values
y_test = test_y.values
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import preprocessing
```
### С нормализованными данными
```
data_dummies = preprocessing.normalize(data_features)
data_dummies.size
index_split = int (round(data_dummies.size * 0.75, 0))
X_train = data_dummies[:index_split][:]
X_test = data_dummies[index_split:][:]
y_train = data_target[:index_split][:]
y_test = data_target[index_split:][:]
grb = GradientBoostingClassifier(n_estimators=50, random_state=17, learning_rate=0.01)
grb.fit(X_train, y_train)
# print("Правильность на обучающем наборе: {:.5f}".format(grb.score(X_train, y_train)))
# print('Правильность на тестовом наборе: {:.5f}'.format(grb.score(X_test, y_test)))
predictions = grb.predict(X_test)
for name, score in zip(data_samples.columns, grb.feature_importances_):
print(name, score)
```
### Со стандартизированными данными
```
data_dummies = preprocessing.scale(data_features)
index_split = int (round(data_dummies.size * 0.75, 0))
X_train = data_dummies[:index_split][:]
X_test = data_dummies[index_split:][:]
y_train = data_target[:index_split][:]
y_test = data_target[index_split:][:]
grb = GradientBoostingClassifier(n_estimators=50, random_state=17, learning_rate=0.01)
grb.fit(X_train, y_train)
print("Правильность на обучающем наборе: {:.5f}".format(grb.score(X_train, y_train)))
print('Правильность на тестовом наборе: {:.5f}'.format(grb.score(X_test, y_test)))
predictions = grb.predict(X_test)
for name, score in zip(data_samples.columns, grb.feature_importances_):
print(name, score)
data_df.shape
data_features.head()
```
### Графики
```
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
sns.heatmap(data_dummies.corr())
plt.rcParams['figure.figsize'] = 12, 8
sns.violinplot(data_df['s_adults'], data_df['s_children'])
for i, col in enumerate(data_df.columns[:-1]):
plt.subplot(5,3,i+1)
plt.scatter(data_df[col], data_df['s_country'])
plt.title(col)
plt.scatter(data_df['hour_of_day'], data_df['ip_long'])
```
### Линейные модели
```
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.linear_model import LassoCV, RidgeCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
linreg = LinearRegression()
lasso = Lasso(random_state=17)
ridge = Ridge(random_state=17)
lasso_cv = LassoCV(random_state=17)
ridge_cv = RidgeCV()
scaler = StandardScaler()
# масштабируем данные
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
linreg.fit(X_train_scaled, y_train)
# среднеквадратичная ошибка между ответами на тестовой выборке и прогнозами метода наименьших квадратов
np.sqrt(mean_squared_error(y_test, linreg.predict(X_test_scaled)) )
pd.DataFrame(linreg.coef_, data_dummies.columns[:], columns=['coef']).sort_values(by='coef', ascending=False)
# модель, котора будет возвращать список коэффициентов:
def report(model, X_train_scaled, y_train, X_test_scaled, y_test, feature_names):
model.fit(X_train_scaled, y_train)
print('MSE = %f' % np.sqrt(mean_squared_error(y_test, linreg.predict(X_test_scaled))))
print(pd.DataFrame(model.coef_, feature_names, columns=['coef']).sort_values(by='coef', ascending=False))
report(lasso, X_train_scaled, y_train, X_test_scaled, y_test, data_dummies.columns[:])
report(lasso_cv, X_train_scaled, y_train, X_test_scaled, y_test, data_dummies.columns[:])
report(ridge, X_train_scaled, y_train, X_test_scaled, y_test, data_dummies.columns[:])
report(ridge_cv, X_train_scaled, y_train, X_test_scaled, y_test, data_dummies.columns[:])
```
| github_jupyter |
# Azure Kubernetes Service (AKS) Deep MNIST
In this example we will deploy a tensorflow MNIST model in the Azure Kubernetes Service (AKS).
This tutorial will break down in the following sections:
1) Train a tensorflow model to predict mnist locally
2) Containerise the tensorflow model with our docker utility
3) Send some data to the docker model to test it
4) Install and configure Azure tools to interact with your cluster
5) Use the Azure tools to create and setup AKS cluster with Seldon
6) Push and run docker image through the Azure Container Registry
7) Test our Elastic Kubernetes deployment by sending some data
#### Let's get started! 🚀🔥
## Dependencies:
* Helm v2.13.1+
* A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM)
* kubectl v1.14+
* az CLI v2.0.66+
* Python 3.6+
* Python DEV requirements
## 1) Train a tensorflow model to predict mnist locally
We will load the mnist images, together with their labels, and then train a tensorflow model to predict the right labels
```
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
import tensorflow as tf
if __name__ == '__main__':
x = tf.placeholder(tf.float32, [None,784], name="x")
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b, name="y")
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels}))
saver = tf.train.Saver()
saver.save(sess, "model/deep_mnist_model")
```
## 2) Containerise the tensorflow model with our docker utility
First you need to make sure that you have added the .s2i/environment configuration file in this folder with the following content:
```
!cat .s2i/environment
```
Now we can build a docker image named "deep-mnist" with the tag 0.1
```
!s2i build . seldonio/seldon-core-s2i-python36:0.10 deep-mnist:0.1
```
## 3) Send some data to the docker model to test it
We first run the docker image we just created as a container called "mnist_predictor"
```
!docker run --name "mnist_predictor" -d --rm -p 5000:5000 deep-mnist:0.1
```
Send some random features that conform to the contract
```
import matplotlib.pyplot as plt
import numpy as np
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
# We now test the REST endpoint expecting the same result
endpoint = "0.0.0.0:5000"
batch = x
payload_type = "ndarray"
sc = SeldonClient(microservice_endpoint=endpoint)
# We use the microservice, instead of the "predict" function
client_prediction = sc.microservice(
data=batch,
method="predict",
payload_type=payload_type,
names=["tfidf"])
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
!docker rm mnist_predictor --force
```
## 4) Install and configure Azure tools
First we install the azure cli - follow specific instructions at https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest
```
!curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
```
#### Configure the azure CLI so it can talk to your server
(if you are getting issues, make sure you have the permmissions to create clusters)
You must run this through a terminal and follow the instructions:
```
az login
```
Once you are logged in, we can create our cluster. Run the following command, it may take a while so feel free to get a ☕.
```
%%bash
# We'll create a resource group
az group create --name SeldonResourceGroup --location westus
# Now we create the cluster
az aks create \
--resource-group SeldonResourceGroup \
--name SeldonCluster \
--node-count 1 \
--enable-addons monitoring \
--generate-ssh-keys
--kubernetes-version 1.13.5
```
Once it's created we can authenticate our local `kubectl` to make sure we can talk to the azure cluster:
```
!az aks get-credentials --resource-group SeldonResourceGroup --name SeldonCluster
```
And now we can check that this has been successful by making sure that our `kubectl` context is pointing to the cluster:
```
!kubectl config get-contexts
```
## Install Seldon Core
### Before we install seldon core, we need to install HELM
For that, we need to create a ClusterRoleBinding for us, a ServiceAccount, and then a RoleBinding
```
!kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default
!kubectl create serviceaccount tiller --namespace kube-system
!kubectl apply -f tiller-role-binding.yaml
```
### Once that is set-up we can install Tiller
```
!helm repo update
!helm init --service-account tiller
# Wait until Tiller finishes
!kubectl rollout status deploy/tiller-deploy -n kube-system
```
### Now we can install SELDON.
We first start with the custom resource definitions (CRDs)
```
!helm install seldon-core-operator --name seldon-core-operator --repo https://storage.googleapis.com/seldon-charts
```
And confirm they are running by getting the pods:
```
!kubectl rollout status statefulset.apps/seldon-operator-controller-manager -n seldon-system
```
### Now we set-up the ingress
This will allow you to reach the Seldon models from outside the kubernetes cluster.
In EKS it automatically creates an Elastic Load Balancer, which you can configure from the EC2 Console
```
!helm install stable/ambassador --name ambassador --set crds.keep=false
```
And let's wait until it's fully deployed
```
!kubectl rollout status deployment.apps/ambassador
```
## Push docker image
In order for the EKS seldon deployment to access the image we just built, we need to push it to the Azure Container Registry (ACR) - you can check if it's been successfully created in the dashboard https://portal.azure.com/#blade/HubsExtension/BrowseResourceBlade/resourceType/Microsoft.ContainerRegistry%2Fregistries
If you have any issues please follow the official Azure documentation: https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-azure-cli
### First we create a registry
Make sure you keep the `loginServer` value in the output dictionary as we'll use it below.
```
!az acr create --resource-group SeldonResourceGroup --name SeldonContainerRegistry --sku Basic
```
### Make sure your local docker instance has access to the registry
```
!az acr login --name SeldonContainerRegistry
```
### Now prepare docker image
We need to first tag the docker image before we can push it.
NOTE: if you named your registry different make sure you change the value of `seldoncontainerregistry.azurecr.io`
```
!docker tag deep-mnist:0.1 seldoncontainerregistry.azurecr.io/deep-mnist:0.1
```
### And push the image
NOTE: if you named your registry different make sure you change the value of `seldoncontainerregistry.azurecr.io`
```
!docker push seldoncontainerregistry.azurecr.io/deep-mnist:0.1
```
## Running the Model
We will now run the model. As you can see we have a placeholder `"REPLACE_FOR_IMAGE_AND_TAG"`, which we'll replace to point to our registry.
Let's first have a look at the file we'll be using to trigger the model:
```
!cat deep_mnist.json
```
Now let's trigger seldon to run the model.
### Run the deployment in your cluster
NOTE: In order for this to work you need to make sure that your cluster has the permissions to pull the images. You can do this by:
1) Go into the Azure Container Registry
2) Select the SeldonContainerRegistry you created
3) Click on "Add a role assignment"
4) Select the AcrPull role
5) Select service principle
6) Find the SeldonCluster
7) Wait until the role has been added
We basically have a yaml file, where we want to replace the value "REPLACE_FOR_IMAGE_AND_TAG" for the image you pushed
```
%%bash
# Change accordingly if your registry is called differently
sed 's|REPLACE_FOR_IMAGE_AND_TAG|seldoncontainerregistry.azurecr.io/deep-mnist:0.1|g' deep_mnist.json | kubectl apply -f -
```
And let's check that it's been created.
You should see an image called "deep-mnist-single-model...".
We'll wait until STATUS changes from "ContainerCreating" to "Running"
```
!kubectl get pods
```
## Test the model
Now we can test the model, let's first find out what is the URL that we'll have to use:
```
!kubectl get svc ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}'
```
We'll use a random example from our dataset
```
import matplotlib.pyplot as plt
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
```
We can now add the URL above to send our request:
```
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
host = "52.160.64.65"
port = "80" # Make sure you use the port above
batch = x
payload_type = "ndarray"
sc = SeldonClient(
gateway="ambassador",
ambassador_endpoint=host + ":" + port,
namespace="default",
oauth_key="oauth-key",
oauth_secret="oauth-secret")
client_prediction = sc.predict(
data=batch,
deployment_name="deep-mnist",
names=["text"],
payload_type=payload_type)
print(client_prediction)
```
### Let's visualise the probability for each label
It seems that it correctly predicted the number 7
```
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
```
| github_jupyter |
# Lab 1: Markov Decision Processes - Problem 1
## Lab Instructions
All your answers should be written in this notebook. You shouldn't need to write or modify any other files.
**You should execute every block of code to not miss any dependency.**
*This project was developed by Peter Chen, Rocky Duan, Pieter Abbeel for the Berkeley Deep RL Bootcamp, August 2017. Bootcamp website with slides and lecture videos: https://sites.google.com/view/deep-rl-bootcamp/. It is adapted from Berkeley Deep RL Class [HW2](https://github.com/berkeleydeeprlcourse/homework/blob/c1027d83cd542e67ebed982d44666e0d22a00141/hw2/HW2.ipynb) [(license)](https://github.com/berkeleydeeprlcourse/homework/blob/master/LICENSE)*
--------------------------
## Introduction
This assignment will review the two classic methods for solving Markov Decision Processes (MDPs) with finite state and action spaces.
We will implement value iteration (VI) and policy iteration (PI) for a finite MDP, both of which find the optimal policy in a finite number of iterations.
The experiments here will use the Frozen Lake environment, a simple gridworld MDP that is taken from `gym` and slightly modified for this assignment. In this MDP, the agent must navigate from the start state to the goal state on a 4x4 grid, with stochastic transitions.
```
from misc import FrozenLakeEnv, make_grader
env = FrozenLakeEnv()
print(env.__doc__)
```
Let's look at what a random episode looks like.
```
# Some basic imports and setup
import numpy as np, numpy.random as nr, gym
import matplotlib.pyplot as plt
%matplotlib inline
np.set_printoptions(precision=3)
# Seed RNGs so you get the same printouts as me
env.seed(0); from gym.spaces import prng; prng.seed(10)
# Generate the episode
env.reset()
for t in range(100):
env.render()
a = env.action_space.sample()
ob, rew, done, _ = env.step(a)
if done:
break
assert done
env.render();
```
In the episode above, the agent falls into a hole after two timesteps. Also note the stochasticity--on the first step, the DOWN action is selected, but the agent moves to the right.
We extract the relevant information from the gym Env into the MDP class below.
The `env` object won't be used any further, we'll just use the `mdp` object.
```
class MDP(object):
def __init__(self, P, nS, nA, desc=None):
self.P = P # state transition and reward probabilities, explained below
self.nS = nS # number of states
self.nA = nA # number of actions
self.desc = desc # 2D array specifying what each grid cell means (used for plotting)
mdp = MDP( {s : {a : [tup[:3] for tup in tups] for (a, tups) in a2d.items()} for (s, a2d) in env.P.items()}, env.nS, env.nA, env.desc)
print("mdp.P is a two-level dict where the first key is the state and the second key is the action.")
print("The 2D grid cells are associated with indices [0, 1, 2, ..., 15] from left to right and top to down, as in")
print(np.arange(16).reshape(4,4))
print("Action indices [0, 1, 2, 3] correspond to West, South, East and North.")
print("mdp.P[state][action] is a list of tuples (probability, nextstate, reward).\n")
print("For example, state 0 is the initial state, and the transition information for s=0, a=0 is \nP[0][0] =", mdp.P[0][0], "\n")
print("As another example, state 5 corresponds to a hole in the ice, in which all actions lead to the same state with probability 1 and reward 0.")
for i in range(4):
print("P[5][%i] =" % i, mdp.P[5][i])
```
## Part 1: Value Iteration
### Problem 1: implement value iteration
In this problem, you'll implement value iteration, which has the following pseudocode:
---
Initialize $V^{(0)}(s)=0$, for all $s$
For $i=0, 1, 2, \dots$
- $V^{(i+1)}(s) = \max_a \sum_{s'} P(s,a,s') [ R(s,a,s') + \gamma V^{(i)}(s')]$, for all $s$
---
We additionally define the sequence of greedy policies $\pi^{(0)}, \pi^{(1)}, \dots, \pi^{(n-1)}$, where
$$\pi^{(i)}(s) = \arg \max_a \sum_{s'} P(s,a,s') [ R(s,a,s') + \gamma V^{(i)}(s')]$$
Your code will return two lists: $[V^{(0)}, V^{(1)}, \dots, V^{(n)}]$ and $[\pi^{(0)}, \pi^{(1)}, \dots, \pi^{(n-1)}]$
To ensure that you get the same policies as the reference solution, choose the lower-index action to break ties in $\arg \max_a$. This is done automatically by np.argmax. This will only affect the "# chg actions" printout below--it won't affect the values computed.
<div class="alert alert-warning">
Warning: make a copy of your value function each iteration and use that copy for the update--don't update your value function in place.
Updating in-place is also a valid algorithm, sometimes called Gauss-Seidel value iteration or asynchronous value iteration, but it will cause you to get different results than our reference solution (which in turn will mean that our testing code won’t be able to help in verifying your code).
</div>
```
def value_iteration(mdp, gamma, nIt, grade_print=print):
"""
Inputs:
mdp: MDP
gamma: discount factor
nIt: number of iterations, corresponding to n above
Outputs:
(value_functions, policies)
len(value_functions) == nIt+1 and len(policies) == nIt
"""
grade_print("Iteration | max|V-Vprev| | # chg actions | V[0]")
grade_print("----------+--------------+---------------+---------")
Vs = [np.zeros(mdp.nS)] # list of value functions contains the initial value function V^{(0)}, which is zero
pis = []
def expected_reward(old_V, s):
trans = mdp.P[s]
for _, t in trans.items(): # for each action
prod = [p * (r + gamma * old_V[s_prime]) for p, s_prime, r in t]
yield sum(prod)
def update(old_V):
new_V = []
new_pi = []
exps = np.array([[*expected_reward(old_V, s)] for s in range(mdp.nS)])
for exp in exps:
b_action = np.argmax(exp)
new_V.append(exp[b_action])
new_pi.append(b_action)
return np.array(new_V), np.array(new_pi)
for it in range(nIt):
oldpi = pis[-1] if len(pis) > 0 else None # \pi^{(it)} = Greedy[V^{(it-1)}]. Just used for printout
Vprev = Vs[-1] # V^{(it)}
# Your code should fill in meaningful values for the following two variables
# pi: greedy policy for Vprev (not V),
# corresponding to the math above: \pi^{(it)} = Greedy[V^{(it)}]
# ** it needs to be numpy array of ints **
# V: bellman backup on Vprev
# corresponding to the math above: V^{(it+1)} = T[V^{(it)}]
# ** numpy array of floats **
V, pi = update(Vprev)
max_diff = np.abs(V - Vprev).max()
nChgActions="N/A" if oldpi is None else (pi != oldpi).sum()
grade_print("%4i | %6.5f | %4s | %5.3f"%(it, max_diff, nChgActions, V[0]))
Vs.append(V)
pis.append(pi)
return Vs, pis
GAMMA = 0.95 # we'll be using this same value in subsequent problems
# The following is the output of a correct implementation; when
# this code block is run, your implementation's print output will be
# compared with expected output.
# (incorrect line in red background with correct line printed side by side to help you debug)
expected_output = """Iteration | max|V-Vprev| | # chg actions | V[0]
----------+--------------+---------------+---------
0 | 0.80000 | N/A | 0.000
1 | 0.60800 | 2 | 0.000
2 | 0.51984 | 2 | 0.000
3 | 0.39508 | 2 | 0.000
4 | 0.30026 | 2 | 0.000
5 | 0.25355 | 1 | 0.254
6 | 0.10478 | 0 | 0.345
7 | 0.09657 | 0 | 0.442
8 | 0.03656 | 0 | 0.478
9 | 0.02772 | 0 | 0.506
10 | 0.01111 | 0 | 0.517
11 | 0.00735 | 0 | 0.524
12 | 0.00310 | 0 | 0.527
13 | 0.00190 | 0 | 0.529
14 | 0.00083 | 0 | 0.530
15 | 0.00049 | 0 | 0.531
16 | 0.00022 | 0 | 0.531
17 | 0.00013 | 0 | 0.531
18 | 0.00006 | 0 | 0.531
19 | 0.00003 | 0 | 0.531"""
Vs_VI, pis_VI = value_iteration(mdp, gamma=GAMMA, nIt=20, grade_print=make_grader(expected_output))
```
Below, we've illustrated the progress of value iteration. Your optimal actions are shown by arrows.
At the bottom, the value of the different states are plotted.
```
for (V, pi) in zip(Vs_VI[:10], pis_VI[:10]):
plt.figure(figsize=(3,3))
plt.imshow(V.reshape(4,4), cmap='gray', interpolation='none', clim=(0,1))
ax = plt.gca()
ax.set_xticks(np.arange(4)-.5)
ax.set_yticks(np.arange(4)-.5)
ax.set_xticklabels([])
ax.set_yticklabels([])
Y, X = np.mgrid[0:4, 0:4]
a2uv = {0: (-1, 0), 1:(0, -1), 2:(1,0), 3:(-1, 0)}
Pi = pi.reshape(4,4)
for y in range(4):
for x in range(4):
a = Pi[y, x]
u, v = a2uv[a]
plt.arrow(x, y,u*.3, -v*.3, color='m', head_width=0.1, head_length=0.1)
plt.text(x, y, str(env.desc[y,x].item().decode()),
color='g', size=12, verticalalignment='center',
horizontalalignment='center', fontweight='bold')
plt.grid(color='b', lw=2, ls='-')
plt.figure()
plt.plot(Vs_VI)
plt.title("Values of different states");
```
| github_jupyter |
# Image Classification with Logistic Regression from Scratch with NumPy
Welcome to another jupyter notebook of implementing machine learning algorithms from scratch using only NumPy. This time we will be implementing a different version of logistic regression for a simple image classification task. I've already done a basic version of logistic regression before [here](https://github.com/leventbass/logistic_regression). This time, we will use logistic regression to classify images. I will show all necessary mathematical equations of logistic regression and how to vectorize the summations in the equations. We will be working with a subset of the famous handwritten digit dataset called MNIST. In the subset, there will only be images of digit 1 and 5. Therefore, we will be solving a binary classification problem.
This notebook includes feature extraction, model training, and evaluation steps. Let's see what we will achieve in this post in steps:
* First, we wil load and visualize the dataset and extract two different set of features to build a classifier on.
* We will run our logistic regression algorithm with gradient descent the representations to classify digits into 1 and 5.
* We will experiment with different learning rates to find the best one.
* Finally, we will evaluate the implemented models, decide which is the best performing one and visualize a decision boundary.
* Once again, let's remind ourselves that we won't be using any function or library that accomplishes the task itself. For instance, we won't use scikit-learn to implement cross validation, we will use numpy for that and for all of the other tasks.
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
```
## Feature Extraction
Let's load the training/test data and labels as numpy arrays. All data that is used is provided in the repository in data folder. Train and test data are 1561x256 and 424x256 dimensional matrices, respectively. Each row in the aforementioned matrices corresponds to an image of a digit. The 256 pixels correspond to a 16x16 image. Label 1 is assigned to digit 1 and label -1 is assigned to digit 5.
```
train_x = np.load('data/train_data.npy')
train_y = np.load('data/train_labels.npy')
test_x = np.load('data/test_data.npy')
test_y = np.load('data/test_labels.npy')
```
Now, let's display two of the digit images, one for digit 1 and one for digit 5. We will use `imshow` function of `matplotlib` library with a suitable colormap. We will first need to reshape 256 pixels to a 16x16 matrix.
```
digit_1 = train_x[0].reshape((16,16))
digit_5 = train_x[-1].reshape((16,16))
plt.subplot(121, title='Digit 1')
plt.imshow(digit_1, cmap='gray');
plt.subplot(122, title='Digit 5')
plt.imshow(digit_5, cmap='gray');
```
**Implementing Representation 1:**
Now, we will extract the **symmetry** and **average intensity** features to use in the model. To compute the intensity features, we compute the average pixel value of the image, and for the symmetry feature, we compute the negative of the norm of the difference between the image and its y-axis symmetrical. We will extract these two features for each image in the training and test sets. As a result, we should obtain a training data matrix of size 1561x2 and test data matrix of size 424x2.
Throughout the notebook, we will refer the representation with these two features as **Representation 1**
```
train_feature_1 = np.mean(train_x, axis=1)
test_feature_1 = np.mean(test_x, axis=1)
mirrored_image_train = np.flip(train_x.reshape((train_x.shape[0],16,16)), axis=2)
mirrored_image_test = np.flip(test_x.reshape((test_x.shape[0],16,16)), axis=2)
plt.subplot(121, title='Image')
plt.imshow(train_x[-1].reshape((16,16)), cmap='gray');
plt.subplot(122, title='Mirrored Image')
plt.imshow(mirrored_image_train[-1], cmap='gray');
train_diff = train_x - mirrored_image_train.reshape((mirrored_image_train.shape[0],256))
test_diff = test_x - mirrored_image_test.reshape((mirrored_image_test.shape[0],256))
norm_train_diff = np.linalg.norm(train_diff, axis=1)
norm_test_diff = np.linalg.norm(test_diff, axis=1)
train_feature_2 = -(norm_train_diff)
test_feature_2 = -(norm_test_diff)
train_X_1 = np.concatenate((train_feature_1[:,np.newaxis], train_feature_2[:,np.newaxis]), axis=1)
test_X_1 = np.concatenate((test_feature_1[:,np.newaxis], test_feature_2[:,np.newaxis]), axis=1)
```
Now, let's provide two scatter plots, one for training and one for test data. The plots will contain the average intensity values in the x-axis and symmetry values in the y-axis. We will denote the data points of label 1 with blue marker shaped <font color='blue'>o</font> and the data points of label -1 with a red marker shaped <font color='red'>x</font>.
```
plt.figure(figsize=(6,6))
plt.scatter(train_X_1[(train_y==1),0], train_X_1[(train_y==1),1], marker='o', color='blue', s=16)
plt.scatter(train_X_1[(train_y==-1),0], train_X_1[(train_y==-1),1], marker='x', color='red', s=16)
plt.title('Class Distribution of Training Data for Representation 1')
plt.xlabel('Average Intensity')
plt.ylabel('Symmetry')
plt.figure(figsize=(6,6))
plt.scatter(test_X_1[(test_y==1),0], test_X_1[(test_y==1),1], marker='o', color='blue', s=16)
plt.scatter(test_X_1[(test_y==-1),0], test_X_1[(test_y==-1),1], marker='x', color='red', s=16)
plt.title('Class Distribution of Test Data for Representation 1')
plt.xlabel('Average Intensity')
plt.ylabel('Symmetry');
```
**Implementing Representation 2:** We will come up with an alternative feature extraction approach and we will refer this representation as **Representation 2**.
```
train_rep2_fet1 = np.array([(i>-1).sum() for i in train_x])/(train_x.shape[0]) # feature 1 for representation 2
test_rep2_fet1 = np.array([(i>-1).sum() for i in test_x])/(test_x.shape[0])
train_rep2_fet2 = np.std(train_x, axis=1) # feature 2 for representation 2
test_rep2_fet2 = np.std(test_x, axis=1)
train_X_2 = np.concatenate((train_rep2_fet1[:,np.newaxis], train_rep2_fet2[:,np.newaxis]), axis=1)
test_X_2 = np.concatenate((test_rep2_fet1[:,np.newaxis], test_rep2_fet2[:,np.newaxis]), axis=1)
```
To create the first feature of representation 2, we sum up all of the pixel values that are higher than -1 since pixel values of -1 represent the surrounding area of the image and not itself. By summing up those values we get a number that would be clearly distinctive for image of number 5 and 1, because evidently number 5 would take up more space than number 1 when it is drawn.
To add another feature to representation 2, let's calculate the standard deviation of the images. Image of number 5 will obviously have more standard deviation than image of number 1 because of the fact that it is more dispersed throughtout the area than number 1, while pixel values of number 1 are more confined and closer to each other than the image of number 5. Hence, taking the standard deviation of pixel values would be a differentiating factor for our images.
```
plt.figure(figsize=(9,5))
plt.scatter(train_X_2[(train_y==1),0], train_X_2[(train_y==1),1], marker='o', color='blue', s=16)
plt.scatter(train_X_2[(train_y==-1),0], train_X_2[(train_y==-1),1], marker='x', color='red', s=16)
plt.title('Class Distribution of Training Data for Representation 2')
plt.xlabel('Average Intensity')
plt.ylabel('Symmetry')
plt.figure(figsize=(9,5))
plt.scatter(test_X_2[(test_y==1),0], test_X_2[(test_y==1),1], marker='o', color='blue', s=16)
plt.scatter(test_X_2[(test_y==-1),0], test_X_2[(test_y==-1),1], marker='x', color='red', s=16)
plt.title('Class Distribution of Test Data for Representation 2')
plt.xlabel('Length of non-white Pixels')
plt.ylabel('Standard Deviation');
```
## Logistic Regression
Let's implement the logistic regression classifier from scratch with gradient descent and train it using Representation 1 and Representation 2 as inputs. We will concatenate 1 to our features for the intercept term, such that one data point will look like for 2-D features [1,$x_1$,$x_2$], and the model vector will be [$w_0, w_1, w_2$], where $w_0$ is the intercept parameter.
```
def data_init(X, y):
y = y[:,np.newaxis]
m = len(y)
X = np.hstack((np.ones((m,1)),X))
n = np.size(X,1)
params = np.zeros((n,1))
return (X, y, params)
```
To implement the gradient of the logistic loss with respect to $w$, first let's derive its expression:
Total cost is:
$E(w) = \frac{1}{N} \sum_{n=1}^{N} \ln \left(1 + \exp \left(-y^{\left(n\right)} w^T x^{\left(n\right)}\right)\right)$
Cost for one sample is:
$E \left(w^{\left(1\right)} \right) = \ln \left(1 + \exp \left(-y^{\left(1\right)} w^T x^{\left(1\right)} \right) \right)$
where;
$y = \begin{bmatrix} y_1 \\ y_2 \\ \vdots \\ y_N \end{bmatrix}_{N\times 1}$
$x = \begin{bmatrix} 1 & {x_1}^{\left(1\right)} & {x_2}^{\left(1\right)} \\
1 & {x_1}^{\left(2\right)} & {x_2}^{\left(2\right)} \\
\vdots & \vdots & \vdots \\
1 & {x_1}^{\left(N\right)} & {x_2}^{\left(N\right)}\end{bmatrix}_{N\times 3}$
$w = \begin{bmatrix}w_0 \\ w_1 \\ w_2 \end{bmatrix}_{3\times 1}$
Let $z = -y^{\left(1\right)} w^T x^{\left(1\right)}$:
$\begin{aligned}
\frac{\partial E}{\partial w_0} &= \frac{\partial \ln(1 + \exp(z))}{\partial w_0} \\
&=\frac{\exp(z) \frac{\partial z}{\partial w_0}}{1 + \exp(z)}
\quad \left( \theta(z) = \frac{\exp(z)}{1 + \exp(z)} \right)\\
&= \theta(z) \frac{\partial z}{\partial w_0} \\
&= \theta\left(-y^{\left(1\right)} w^T x^{\left(1\right)}\right)
\frac{\partial \left(-y^{\left(1\right)} w^T x^{\left(1\right)}\right)}{\partial w_0} \\
&= \theta\left(-y^{\left(1\right)} w^T x^{\left(1\right)}\right)
\frac{\partial \left(-y^{\left(1\right)} \left(w_0 + w_1 {x_1}^{\left(1\right)} + w_2 {x_2}^{\left(1\right)}\right)\right)}{\partial w_0}\\
\frac{\partial E}{\partial w_0} &= \theta\left(-y^{\left(1\right)} w^T x^{\left(1\right)}\right) \left( -y^{\left(1\right)} \right) \\
\frac{\partial E}{\partial w_1} &= \theta\left(-y^{\left(1\right)} w^T x^{\left(1\right)}\right) \left( -y^{\left(1\right)} {x_1}^{\left(1\right)} \right)\\
\frac{\partial E}{\partial w_2} &= \theta\left(-y^{\left(1\right)} w^T x^{\left(1\right)}\right) \left( -y^{\left(1\right)} {x_2}^{\left(1\right)} \right)\\
\end{aligned}$
$\begin{aligned}
\nabla E (w) &= \frac{1}{N} \sum_{n=1}^{N} -\theta \left(-y^{\left(n\right)} w^T x^{\left(n\right)}\right) y^{\left(n\right)} x^{\left(n\right)}\\
&= \frac{1}{N} {\left( - \textbf{y} \circ \textbf{x} \right)}^T \cdot \theta \left( -\textbf{y} \circ \textbf{x w} \right)
\end{aligned}$
To prove that our implementation is converging, we will keep the loss values at each gradient descent iteration in a numpy array. To decide when to terminate the gradient descent iterations, we will check the absolute difference between the current loss value and the loss value of the previous step. If the difference is less than a small number, such as $10^{-5}$, we will exit the loop.
```
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def gradient_descent(X, y, params, learning_rate):
m = len(y)
cost_history = []
i=0
while(True):
params = params - (learning_rate/m) * ((-y * X).T @ sigmoid(-y * (X @ params)))
cost_history.append(compute_cost(X, y, params))
if(i!=0 and abs(cost_history[i] - cost_history[i-1]) < 10**-5):
break;
i+=1
cost_history = np.array(cost_history)
return (cost_history, params)
def compute_cost(X, y, theta):
N = len(y)
cost = np.sum(np.log(1+np.exp(-y * (X @ theta)))) / N
return cost
```
After the training is finalized, we will plot the loss values with respect to iteration count. Obviously, we should observe a decreasing loss as the number of iterations increases. Also, we will experiment with 5 different learning rates between 0 and 1, and plot the convergence curves for each learning rate in the same plot to observe the effect of the learning rate (step size) on the convergence.
```
(X, y, params) = data_init(train_X_1, train_y)
lr_list = [0.001, 0.003, 0.01, 0.03, 0.1]
c_list = ['red', 'green', 'yellow', 'blue','black']
plt.figure()
for lr, color in zip(lr_list, c_list):
(cost_history, params_optimal) = gradient_descent(X, y, params, lr)
plt.plot(range(len(cost_history)),cost_history, c=color);
plt.title("Convergence Graph of Cost Function")
plt.xlabel("Number of Iterations")
plt.ylabel("Cost")
plt.show()
```
## Evaluation
Now, let's train the logistic regression classifier on Representation 1 and 2 with the best learning rate we have used so far. We will report the training and test classification accuracy as:
\begin{align*}
\frac{\text{number of correctly classified samples}}{\text{total number of samples}}x100
\end{align*}
```
def predict(X, params):
y_pred_dummy = np.round(sigmoid(X @ params))
y_pred = np.where(y_pred_dummy==0,-1,1)
return y_pred
def get_accuracy(y_pred, y):
score = float(sum(y_pred == y))/ float(len(y)) * 100
return score
def evaluate(train_X, train_y, test_X, test_y, learning_rate, lambda_param):
(X, y, params) = data_init(train_X, train_y)
(_, params_optimal_1) = gradient_descent(X, y, params, learning_rate)
X_normalized = test_X
X_test = np.hstack((np.ones((X_normalized.shape[0],1)),X_normalized))
y_pred_train = predict(X, params_optimal_1)
train_score = get_accuracy(y_pred_train, y)
print('Training Score:',train_score)
y_pred_test = predict(X_test, params_optimal_1)
test_score = get_accuracy(y_pred_test, test_y[:,np.newaxis])
print('Test Score:',test_score)
print('Evaluation results for Representation 1:')
print('-'*50)
evaluate(train_X_1, train_y, test_X_1, test_y, 0.1, 0.0003)
print('\nEvaluation results for Representation 2:')
print('-'*50)
evaluate(train_X_2, train_y, test_X_2, test_y, 0.1, 0.0001)
```
Last but not least, we will visualize the decision boundary (the line that is given by $\mathbf{w}^{T}x=0$) obtained from the logistic regression classifier learned. For this purpose, we will only use Representation 1. Below, two scatter plots can be seen for training and test data points with the decision boundary shown on each of the plots.
```
(X, y, params) = data_init(train_X_1,train_y)
learning_rate = 0.1
(_, params_optimal_1) = gradient_descent(X, y, params, learning_rate)
slope = -(params_optimal_1[1] / params_optimal_1[2])
intercept = -(params_optimal_1[0] / params_optimal_1[2])
titles = ['Training Data with Decision Boundary', 'Test Data with Decision Boundary']
for X, y, title in [(train_X_1, y, titles[0]), (test_X_1, test_y, titles[1])]:
plt.figure(figsize=(7,7))
plt.scatter(X[:,0],X[:,1],c=y.reshape(-1), s=14, cmap='bwr')
ax = plt.gca()
ax.autoscale(False)
x_vals = np.array(ax.get_xlim())
y_vals = intercept + (slope * x_vals)
plt.title(title);
plt.plot(x_vals, y_vals, c='k')
```
| github_jupyter |
## Script S5
Convert from jekyll title formato into origin url.
**Target**: http://127.0.0.1:4000/2021/03/20/Fluid-Typography-with-CSS-Clamp()-is-My-New-Favorite-Thing-DEV-Community.html
**File Path**: 2021-03-20-Fluid-Typography-with-CSS-Clamp()-is-My-New-Favorite Thing---DEV-Community.md
**Title**: Fluid Typography with CSS-Clamp() is My New Favorite Thing
```
from selenium import webdriver
from bs4 import BeautifulSoup
from neo4j import GraphDatabase, basic_auth
from neo4j.exceptions import Neo4jError
import csv
import json
import time
import os
import re
FOLDER_PATH = '../web/_posts/'
test_url = 'https://dev.to/cruip/50-free-tools-and-resources-to-create-awesome-user-interfaces-1c1b'
DATA_FILE_PATH = 'dev-to-articles.csv'
DATABASE_USERNAME="neo4j"
DATABASE_PASSWORD="spade-discounts-switch"
DATABASE_URL="bolt://localhost:7687"
DATA_FILE_PATH = 'dev-to-articles.csv'
driver = GraphDatabase.driver(DATABASE_URL, auth=basic_auth(DATABASE_USERNAME, str(DATABASE_PASSWORD)))
def cprint(content,module='DEBUG',*args):
if args:
print('\033[1;32;43m ['+module+'] \033[0m '+ content + '\033[1;35m' +str(args) +' \033[0m' + time.strftime(" |%Y-%m-%d %H:%M:%S|", time.localtime()) )
else:
print('\033[1;32;43m ['+module+'] \033[0m '+ content + time.strftime(" |%Y-%m-%d %H:%M:%S|", time.localtime()))
def remove_invalid_text(title):
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "", title)
return new_title
def init_yaml_header(soup):
'''
Args: soup of source post
Return: valid yaml for riinosite3
'''
title = soup.head.title.string
date = soup.find("time", {"class":"date"})['datetime'][:10]
author = soup.find("a",{"class":"flex items-center mr-4 mb-4 s:mb-0 fw-medium crayons-link"}).contents[-1].replace('\n','').strip()
yaml = ['---\n',
'layout: post\n'
f'title: "{title}"\n',
f'author: "{author}"\n',
f'date: {date}\n',
'toc: false\n'
'tags:\n'
]
for tag in soup.find_all("a",{"class":"crayons-tag"}):
yaml.append(' - '+tag.text[1:]+'\n')
yaml.append('---\n')
return yaml
def generate_post_filename(published_at,title):
'''
Args: soup of source post
Return: valid file name for riinosite3
'''
filename = published_at[:10]+'-'+title.replace(' ','-')+'---DEV-Community'+'.md'
filename = remove_invalid_text(filename)
filename = filename.replace('.','-')
return filename
def save_markdown_file(soup,folder_path):
'''
Args: save markdown file from soup into target folder path
Return: None
'''
with open(FOLDER_PATH+generate_post_filename(soup), mode='w',encoding="utf-8") as file_w:
#write yaml
file_w.writelines(init_yaml_header(soup))
#write body
for i in soup.find("div", {"id":"article-body"}).contents:
file_w.write(str(i))
cprint('Write file ssuccessfully ','FILE',FOLDER_PATH+generate_post_filename(soup))
file_w.close()
def db_update_article_jekyll_path(session,id,value):
'''
Add a new value for article node with id
Args:
session: db session,driver.session()
id: article id, in csv file.
value: value of that key : reading_time
'''
def _cypher(tx,key,value):
return list(tx.run(
'''
MATCH (n:Article { id: $id })
SET n.jekyll_path = $value
RETURN n
'''
))
result = session.write_transaction(_cypher,key,value)
return result
article_props = []
if os.path.exists(DATA_FILE_PATH):
if not os.path.getsize(DATA_FILE_PATH):
cprint(DATA_FILE_PATH +'is empty')
else:
with open(DATA_FILE_PATH, mode='r',encoding="utf-8") as data_file_r:
csv_reader = csv.DictReader(data_file_r)
line_count = 0
props=set()
for row in csv_reader:
if line_count == 0:
cprint(f'Processing CSV header {", ".join(row)}','CSV')
line_count += 1
article_prop = {
'id': row['id'],
'title': row['title'],
'url': row['url'],
'main_image_url': row['main_image_url'],
'reading_time': row['reading_time'],
'tag_names': row['tag_names'],
'published_at': row['published_at'],
'source_site':'dev.to',
'author_name':row['author_name'],
'count':row['public_reactions_count']
}
if article_prop in article_props:
continue
else:
article_props.append(article_prop)
line_count += 1
cprint(f'File processed successfully with {line_count-1} ids.','CSV')
data_file_r.close()
else:
cprint(DATA_FILE_PATH +' does not exist')
generate_post_filename(article_props[1]['published_at'],article_props[1]['title'])
FOLDER_PATH+generate_post_filename(article_props[1]['published_at'],article_props[1]['title'])
'2020-10-06-ReactJS-Roadmap-🗺-For-Developers-💻---DEV-Community.md'
```
| github_jupyter |
# Инициализация
```
#@markdown - **Монтирование GoogleDrive**
from google.colab import drive
drive.mount('GoogleDrive')
# #@markdown - **Размонтирование**
# !fusermount -u GoogleDrive
```
# Область кодов
```
#@title Приближение с помощью кривых { display-mode: "both" }
# Curve fitting
# В программе реализовано приближение исходных данных с помощью нейронных сетей с одным скрытым слоем
# Можно сравнить с результатами метода регуляризации Тихонова
# conding: utf-8
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import time
#@markdown - **Настройка параметров**
num_epoch = 200 #@param {type: "integer"}
# Предварительная обработка данных образца
data = np.array([[-2.95507616, 10.94533252],
[-0.44226119, 2.96705822],
[-2.13294087, 6.57336839],
[1.84990823, 5.44244467],
[0.35139795, 2.83533936],
[-1.77443098, 5.6800407],
[-1.8657203, 6.34470814],
[1.61526823, 4.77833358],
[-2.38043687, 8.51887713],
[-1.40513866, 4.18262786]])
x = data[:, 0]
y = data[:, 1]
X = x.reshape(-1, 1)
Y = y.reshape(-1, 1)
# Более прогнозируемые данные, чем исходные данные
x_pre = np.linspace(x.min(), x.max(), 30, endpoint=True).reshape(-1, 1)
#@markdown - **Создание graph**
graph = tf.Graph()
with graph.as_default():
with tf.name_scope('Input'):
x = tf.placeholder(tf.float32, shape=[None, 1], name='x')
y = tf.placeholder(tf.float32, shape=[None, 1], name='y')
with tf.name_scope('FC'):
w_1 = tf.get_variable('w_fc1', shape=[1, 32], initializer=tf.initializers.truncated_normal(stddev=0.1))
b_1 = tf.get_variable('b_fc1', initializer=tf.constant(0.1, shape=[32]))
layer_1 = tf.nn.sigmoid(tf.matmul(x, w_1) + b_1)
with tf.name_scope('Output'):
w_2 = tf.get_variable('w_fc2', shape=[32, 1], initializer=tf.initializers.truncated_normal(stddev=0.1))
b_2 = tf.get_variable('b_fc2', initializer=tf.constant(0.1, shape=[1]))
layer_2 = tf.matmul(layer_1, w_2) + b_2
with tf.name_scope('Loss'):
loss = tf.reduce_mean(tf.pow(layer_2 - y, 2))
with tf.name_scope('Train'):
train_op = tf.train.AdamOptimizer(learning_rate=3e-1).minimize(loss)
#@markdown - **Обучение модели**
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
time_start = time.time()
for num in range(num_epoch):
_, ls = sess.run([train_op, loss], feed_dict={x: X, y: Y})
print_list = [num+1, ls]
if (num+1) % 10 == 0 or num == 0:
print('Epoch {0[0]}, loss: {0[1]:.4f}.'.format(print_list))
# time_start = time.time()
y_pre = sess.run(layer_2, feed_dict={x: x_pre})
sess.close()
time_end = time.time()
t = time_end - time_start
print('Running time is: %.4f s.' % t)
#@markdown - **Кривая прогнозирования**
data_pre = np.c_[x_pre, y_pre]
DATA = [data, data_pre]
NAME = ['Training data', 'Fitting curve']
STYLE = ['*r', 'b']
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 6))
for dat, name, style in zip(DATA, NAME, STYLE):
ax.plot(dat[:, 0], dat[:, 1], style, markersize=8, label=name)
ax.legend(loc='upper right', fontsize=14)
ax.tick_params(labelsize=14)
plt.show()
```
| github_jupyter |
Lecture 8<br>
Day: wednesday<br>
Date: Oct 06th 2021
### Problem Solve:
```
import numpy as np
import os
import random
Quiz = np.random.randint(0,16,size =40)
print(Quiz)
Assignment = np.random.randint(0,51, size = 50)
print(Assignment)
Mid= np.random.randint(0,81, size= 80)
print(Mid)
Final = np.random.randint(0, 151, size = 150 )
print(Final)
Quiz_Sum = (Quiz/15) * 30
print(Quiz_Sum)
Final_Sum= (Final/150)* 40
print(Final_Sum)
Mid_Sum = (Mid / 10 )* 20
print(Mid_Sum)
Assignment_Sum = (Assignment / 50) * 30
print(Assignment_Sum)
Final_Marks = Assignment_Sum + Mid_Sum+ Final_Sum + Quiz_Sum
print(Final_Marks)
import pandas as pd
# Create a basic Series:
x = pd.Series([1,2,3,4,5,6,7,8])
# left side are the index
# in Series
print(x)
# in this we can also assign the index in series as well
data_new = [1,2,3,4]
index_new= ['a','b','c','d']
eye = pd.Series(data=data_new , index=index_new )
print(eye)
# There are different attributies
# All of the array function can be imported in the series as well,
ser.name
ser.index
ser.values
ser.sort_values()
ser.sort_index()
# Splicaing the values and print data using range as well
x = pd.Series([1,2,3,4,5,6,7,8])
x[1]
# while adding the index must me same if index are not same then the operation will not be formed
# the above will only be implmented when there are two series but if there is one series then you can add vales in the index
# for One Series
x[3]=x[1]+ x[2]
x[3]
print(x)
# For Two Series:
x = pd.Series([1,2,3,4,5,6,7,8])
y = pd.Series([17,9,10,11,12,13,15,16])
z = x+y
# As you can see the same index values are sum.
print(z)
```
# Exercise
```
my_index =['a','b','c']
my_values = [223,224,334]
my_dict = {'a':222, 'b':333, 'c':334}
my_arr = np.array(my_values)
pd.Series(my_values)
pd.Series(data= my_values, index= my_index)
pd.Series(my_arr)
pd.Series(my_dict)
```
1.2. Series attributes and indexing:
```
s = pd.Series(data =[111,222,333,444], index= ['a','b','c','d'], name= 'MySeries')
s.index
s.name
s.dtype
s[1]
s['a']
s[['a','d']]
s1 = pd.Series(data=[1,2,3,4], index = ['d','b','c','a'])
s2 = pd.Series(data=[1,2,3,4], index = ['a','b','d','e'])
s1 + s2
s1-s2
s1* s2
s1/s2
2*s1
s1.sum()
s1.mean()
s1.median()
s1.max()
s1.std()
s1.sort_values()
s1.sort_index()
ser_height = pd.Series([165.3, 170.1, 175.0, 182.1, 168.0, 162.0, 155.2, 176.9, 178.5, 176.1, 167.1, 180.0, 162.2, 176.1, 158.2, 168.6, 169.2],name='height')
ser_height
ser_height.apply(lambda x : x/100)
```
# Data Frame:
```
# it's s 2D array and it is used the most
# the list of Numpy is that all of the data type must be same
# to create a data frame
```
# Exercise 05
```
import pandas as pd
import numpy as np
import os
data = {'Name': ['Jake','Jennifer', 'Rafay', 'Paul Walker', ' Jake Paul'], 'Age': [24,21,25,19,22], 'Gender': ['M','F','M','M','M']}
```
# this will print data into the dataframe
```
df=pd.DataFrame(data)
df
```
### Reading data from DataFrame:
```
df=pd.read_csv('data_studentlist.csv', header = 'infer')
df.shape
df.size
df.ndim
df.index
type(df)
df.info()
df.describe()
df.head(6)
df.tail(5)
df.columns = ['NAME', 'GENDER' , 'AGE', 'GRADE', 'ABSENCE', 'BLOODTYPE', 'HEIGHT', 'WEIGHT']
df.head(3)
df.NAME
type(df.NAME)
df[['NAME']]
df.loc[:,'NAME']
df.loc[:,['NAME','GENDER']]
df.iloc[:,[0,1]]
header= df.columns
header
df.loc[:,(header =='NAME') | (header == 'GENDER')]
# This is a row.
df.loc[2]
df.loc[2:4]
df.iloc[2:4]
df.drop(columns=['NAME', 'GENDER'])
df.loc[:, (header!='NAME') & (header!='GENDER')]
df[df.GENDER=='M']
df[df.GENDER=='F']
df2 = df.drop(columns=['GRADE','ABSENCE'])
df2.to_csv('data_mine.csv',index=False)
df3 = pd.read_csv('data_mine.csv',encoding='latin1',header='infer')
df3.head(3)
dfx = pd.read_excel('StudentData.xlsx', sheet_name='Sheet1')
dfx.head(5)
dfx.to_excel('data_studentlist2.xlsx',sheet_name='NewSheet', index=False)
```
# Exercise 206
```
import pandas as pd
import numpy as np
import os
df = pd.read_csv('data_studentlist.csv', header='infer')
# Replace the columns (header).
df.columns = ['NAME', 'GENDER' , 'AGE', 'GRADE', 'ABSENCE', 'BLOODTYPE', 'HEIGHT', 'WEIGHT']
df.head(3)
df.columns = ['NAME', 'GENDER' , 'AGE', 'GRADE', 'ABSENCE', 'BLOODTYPE', 'HEIGHT', 'WEIGHT']
df.head(3)
df['BMI'] = 10000*df['WEIGHT']/df['HEIGHT']**2
df
df.head(5)
df.drop('BMI',axis=1)
df.head(5)
df.drop('BMI', axis=1, inplace=True)
df.head(5)
df_left= df.loc[:,['NAME','AGE','GENDER','GRADE','ABSENCE']]
df_left_small=df_left.loc[:10]
df_left_small
df_right = df.loc[:,['NAME','BLOODTYPE','WEIGHT','HEIGHT']]
df_right_small = df_right.loc[7:,]
df_right_small
df_right = df.loc[:,['NAME','BLOODTYPE','WEIGHT','HEIGHT']]
df_right_small = df_right.loc[7:,]
df_right_small
pd.merge(df_left_small,df_right_small,left_on='NAME', right_on = 'NAME', how='inner')
pd.merge(df_left_small,df_right_small,left_on='NAME', right_on = 'NAME', how='left')
pd.merge(df_left_small,df_right_small,left_on='NAME', right_on = 'NAME', how='right')
pd.merge(df_left_small,df_right_small,left_on='NAME', right_on = 'NAME', how='outer')
pd.concat([df_left_small,df_right_small],sort=True)
pd.concat([df_left_small,df_right_small],axis=1,sort=True)
```
| github_jupyter |
```
# import project Libraries
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Load the data and create pandas DataFrame.
df = pd.read_csv('./My_data/Food_enforcement_data.csv',encoding= 'unicode_escape')
# Exploring the summary of our DataFrame
df.info()
# get columns names( this works as week refrence once i start dropping columns not needed)
df.columns
# To identify any missing data (null value) and deal with with it.
df.isnull().sum()
from sklearn.preprocessing import LabelEncoder
# LabelEncoder is used when Encoding Categorical features to numerical.
label_encode = LabelEncoder()
# Label encoding is performed on the target class "Classification" because its ordinal in nature
df["Classification"] = label_encode.fit_transform(df['Classification'].astype('str'))
# Source: https://maxhalford.github.io/blog/target-encoding-done-the-right-way/
def calc_smooth_mean(df1, df2, cat_name, target, weight):
# Compute the global mean
mean = df[target].mean()
# Compute the number of values and the mean of each group
agg = df.groupby(cat_name)[target].agg(['count', 'mean'])
counts = agg['count']
means = agg['mean']
# Compute the "smoothed" means
smooth = (counts * means + weight * mean) / (counts + weight)
# Replace each value by the according smoothed mean
if df2 is None:
return df1[cat_name].map(smooth)
else:
return df1[cat_name].map(smooth),df2[cat_name].map(smooth.to_dict())
WEIGHT = 5
df['Recall_Reason'] = calc_smooth_mean(df1=df, df2=None, cat_name='Recall_Reason', target='Classification', weight=WEIGHT)
df['Product_Description'] = calc_smooth_mean(df1=df, df2=None, cat_name='Product_Description', target='Classification', weight=WEIGHT)
import category_encoders as ce
# here we are encoding "Classification column" which has ordinal data (ClassI, Class II, Class III)
# Using LabelEncoder technique.
encode = ce.OneHotEncoder(cols='Classification',handle_unknown='return_nan',return_df=True,use_cat_names=True)
data_encoded = encode.fit_transform(df)
# Here we are Encoding 'Recalling_Firm_City' Column using binary Encoding technique
encode_0 = ce.BinaryEncoder(cols=['Recalling_Firm_City'],return_df=True)
data_encoded = encode_0.fit_transform(data_encoded)
# encoding column "status" using One-Hot-Encode method
enoder_1 = ce.OneHotEncoder(cols='Status',handle_unknown='return_nan',return_df=True,use_cat_names=True)
# Trnsforming the encoded data.
data_encoded = enoder_1.fit_transform(data_encoded)
# Here we are Encoding 'Recalling_Firm_Country' Column using binary Encoding technique
encoder_2 = ce.BinaryEncoder(cols=['Recalling_Firm_Country'],return_df=True)
# Trnsforming the encoded data.
data_encoded = encoder_2.fit_transform(data_encoded)
# drop unwanted columns
data_encoded = data_encoded.drop('Recalling_Firm_State',axis=1)
# Dropping all Columns deemed unNecessary for the model performance
data_encoded = data_encoded.drop('Product_Type',axis=1)
data_encoded = data_encoded.drop('Recalling_Firm',axis=1)
data_encoded = data_encoded.drop('Product ID',axis=1)
data_encoded = data_encoded.drop('FEI_Number',axis=1)
data_encoded = data_encoded.drop('Classification_Date',axis=1)
X = data_encoded.iloc[:,:23].values
y = data_encoded.iloc[:,23:].values
from sklearn.model_selection import train_test_split
#Training test split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
# Feature scaling
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Import model frameworks and libraries
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.callbacks import EarlyStopping
# the model takes in 15507 records with 23 input features
model = Sequential()
model.add(Dense(24, input_dim=23, activation='relu', # rectified linear activation function is the function of choice
kernel_initializer='random_normal')) #nitializer that generates tensors with a normal distribution.
model.add(Dense(12,activation='relu',kernel_initializer='random_normal'))
model.add(Dense(6,activation='relu',kernel_initializer='random_normal'))
# softmax is the activation of choice for multiclass Classification problems
model.add(Dense(3,activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics =['accuracy'])
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5,
verbose=1, mode='auto', restore_best_weights=True)
# model fiting
# when trining the model, i have introduced Keras callbacks functions i.e early stopping to prevent overfitting
model.fit(X_train,y_train,validation_data=(X_test,y_test),callbacks=[monitor],verbose=2,epochs=1000)
#create a data frame of model metrics i.e losses and accuracy
df_loss = pd.DataFrame(model.history.history)
df_loss
# ploting loss against validation loss and accuracy vas validation accuracy
df_loss.plot()
# predict unseen test data
pred = model.predict(X_test)
pred = np.argmax(pred,axis=1)
from sklearn import metrics
y_compare = np.argmax(y_test,axis=1)
score = metrics.accuracy_score(y_compare, pred)
print("Accuracy score: {}".format(score))
# define a confusion matrix
def plot_confusion_matrix(cm, names, title='Confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(names))
plt.xticks(tick_marks, names, rotation=45)
plt.yticks(tick_marks, names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# Compute confusion matrix
My_products = pd.DataFrame(data_encoded.iloc[:,23:])
products = My_products.columns
cm = confusion_matrix(y_compare, pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm, products)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, products,
title='Normalized confusion matrix')
plt.show()
pred[:30]
# Evaluate the model performance
model.evaluate(X_test,y_test,verbose=0)
epochs = len(df_loss)
# here we will scale the whole dataset without spliting
scaled_X = scaler.fit_transform(X)
# Second model but this time with all the 20677 dataset record (no splitting)
model = Sequential()
model.add(Dense(X.shape[1],input_dim=23, activation='relu',
kernel_initializer='random_normal'))
model.add(Dense(12,activation='relu',kernel_initializer='random_normal'))
model.add(Dense(6,activation='relu',kernel_initializer='random_normal'))
model.add(Dense(3,activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics =['accuracy'])
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5,
verbose=1, mode='auto', restore_best_weights=True)
model.fit(scaled_X,y,validation_data=(X_test,y_test),callbacks=[monitor],verbose=2,epochs=500)
from tensorflow.keras.models import load_model
# Saving the model
model.save('Food_racall_draft.h5')
predictions = model.predict(X)
predictions = np.argmax(predictions,axis=1)
import joblib
# Save the scaler
joblib.dump(scaler, 'food_recall_scaler.pki')
# this list will hold all products classified as class I or II
Class_I_and_II_products = []
# In this prediction model the predicted items are at the same index as they were in the original dataframe
# for example if product id 184301 was at position 44 in the original dataframe i.e df then it will be at the same
# index position 44 in the predictions
for index in range(df.shape[0]):
if predictions[index] ==0 | predictions[index] ==1:
Class_I_and_II_products.append(df.loc[index,['Product ID']][0]) # get the preduct id if it's class I or II
# A list of product classified as class I and II with potentital to be withdrawn.
Class_I_and_II_products[:20]
# Determine which countries whose recalled products are classified as Class I or II
violating_Countries = [df.loc[index,['Recalling_Firm_Country']][0] for index in range(df.shape[0]) if predictions[index] ==0 |predictions[index] ==1]
def High_Violating_countries(Country_list):
country_dictionary = {}
for country in Country_list:
if country not in country_dictionary:
country_dictionary[country] = violating_Countries.count(country)
sorted(country_dictionary.items(), key=lambda x: x[1], reverse=True)
return country_dictionary
high_riask_countries = High_Violating_countries(violating_Countries)
# return countries that appears at least five times in the violation list.
for key, value in high_riask_countries.items():
if value >= 5:
print("{:>10} has {:>5} food products in Violation ".format(key, value))
```
| github_jupyter |
# Machine Learning application: Forecasting wind power. Using alternative energy for social & enviromental Good
<table>
<tr><td>
<img src="https://github.com/dmatrix/mlflow-workshop-part-3/raw/master/images/wind_farm.jpg"
alt="Keras NN Model as Logistic regression" width="800">
</td></tr>
</table>
In this notebook, we will use the MLflow Model Registry to build a machine learning application that forecasts the daily power output of a [wind farm](https://en.wikipedia.org/wiki/Wind_farm).
Wind farm power output depends on weather conditions: generally, more energy is produced at higher wind speeds. Accordingly, the machine learning models used in the notebook predicts power output based on weather forecasts with three features: `wind direction`, `wind speed`, and `air temperature`.
* This notebook uses altered data from the [National WIND Toolkit dataset](https://www.nrel.gov/grid/wind-toolkit.html) provided by NREL, which is publicly available and cited as follows:*
* Draxl, C., B.M. Hodge, A. Clifton, and J. McCaa. 2015. Overview and Meteorological Validation of the Wind Integration National Dataset Toolkit (Technical Report, NREL/TP-5000-61740). Golden, CO: National Renewable Energy Laboratory.*
* Draxl, C., B.M. Hodge, A. Clifton, and J. McCaa. 2015. "The Wind Integration National Dataset (WIND) Toolkit." Applied Energy 151: 355366.*
* Lieberman-Cribbin, W., C. Draxl, and A. Clifton. 2014. Guide to Using the WIND Toolkit Validation Code (Technical Report, NREL/TP-5000-62595). Golden, CO: National Renewable Energy Laboratory.*
* King, J., A. Clifton, and B.M. Hodge. 2014. Validation of Power Output for the WIND Toolkit (Technical Report, NREL/TP-5D00-61714). Golden, CO: National Renewable Energy Laboratory.*
Google's DeepMind publised a [AI for Social Good: 7 Inspiring Examples](https://www.springboard.com/blog/ai-for-good/) blog. One of example was
how Wind Farms can predict expected power ouput based on wind conditions and temperature, hence mitigating the burden from consuming
energy from fossil fuels.
<table>
<tr><td>
<img src="https://github.com/dmatrix/ds4g-workshop/raw/master/notebooks/images/deepmind_system-windpower.gif"
alt="Deep Mind ML Wind Power" width="400">
<img src="https://github.com/dmatrix/ds4g-workshop/raw/master/notebooks/images/machine_learning-value_wind_energy.max-1000x1000.png"
alt="Deep Mind ML Wind Power" width="400">
</td></tr>
</table>
```
import warnings
warnings.filterwarnings("ignore")
import mlflow
mlflow.__version__
```
## Run some class and utility notebooks
This defines and allows us to use some Python model classes and utility functions
```
%run ./rfr_class.ipynb
%run ./utils_class.ipynb
```
## Load our training data
Ideally, you would load it from a Feature Store or Delta Lake table
```
# Load and print dataset
csv_path = "https://raw.githubusercontent.com/dmatrix/olt-mlflow/master/model_registery/notebooks/data/windfarm_data.csv"
# Use column 0 (date) as the index
wind_farm_data = Utils.load_data(csv_path, index_col=0)
wind_farm_data.head(5)
```
## Get Training and Validation data
```
X_train, y_train = Utils.get_training_data(wind_farm_data)
val_x, val_y = Utils.get_validation_data(wind_farm_data)
```
## Initialize a set of hyperparameters for the training and try three runs
```
# Initialize our model hyperparameters
params_list = [{"n_estimators": 100},
{"n_estimators": 200},
{"n_estimators": 300}]
mlflow.set_tracking_uri("sqlite:///mlruns.db")
model_name = "WindfarmPowerForecastingModel"
for params in params_list:
rfr = RFRModel.new_instance(params)
print("Using paramerts={}".format(params))
runID = rfr.mlflow_run(X_train, y_train, val_x, val_y, model_name, register=True)
print("MLflow run_id={} completed with MSE={} and RMSE={}".format(runID, rfr.mse, rfr.rsme))
```
## Let's Examine the MLflow UI
1. Let's examine some models and start comparing their metrics
2. **mlflow ui --backend-store-uri sqlite:///mlruns.db**
# Integrating Model Registry with CI/CD Forecasting Application
<table>
<tr><td>
<img src="https://github.com/dmatrix/mlflow-workshop-part-3/raw/master/images/forecast_app.png"
alt="Keras NN Model as Logistic regression" width="800">
</td></tr>
</table>
1. Use the model registry fetch different versions of the model
2. Score the model
3. Select the best scored model
4. Promote model to production, after testing
# Define a helper function to load PyFunc model from the registry
<table>
<tr><td> Save a Built-in MLflow Model Flavor and load as PyFunc Flavor</td></tr>
<tr><td>
<img src="https://raw.githubusercontent.com/dmatrix/mlflow-workshop-part-2/master/images/models_2.png"
alt="" width="600">
</td></tr>
</table>
```
def score_model(data, model_uri):
model = mlflow.pyfunc.load_model(model_uri)
return model.predict(data)
```
## Load scoring data
Again, ideally you would load it from on-line or off-line FeatureStore
```
# Load the score data
score_path = "https://raw.githubusercontent.com/dmatrix/olt-mlflow/master/model_registery/notebooks/data/score_windfarm_data.csv"
score_df = Utils.load_data(score_path, index_col=0)
score_df.head()
# Drop the power column since we are predicting that value
actual_power = pd.DataFrame(score_df.power.values, columns=['power'])
score = score_df.drop("power", axis=1)
```
## Score the version 1 of the model
```
# Formulate the model URI to fetch from the model registery
model_uri = "models:/{}/{}".format(model_name, 1)
# Predict the Power output
pred_1 = pd.DataFrame(score_model(score, model_uri), columns=["predicted_1"])
pred_1
```
#### Combine with the actual power
```
actual_power["predicted_1"] = pred_1["predicted_1"]
actual_power
```
## Score the version 2 of the model
```
# Formulate the model URI to fetch from the model registery
model_uri = "models:/{}/{}".format(model_name, 2)
# Predict the Power output
pred_2 = pd.DataFrame(score_model(score, model_uri), columns=["predicted_2"])
pred_2
```
#### Combine with the actual power
```
actual_power["predicted_2"] = pred_2["predicted_2"]
actual_power
```
## Score the version 3 of the model
```
# Formulate the model URI to fetch from the model registery
model_uri = "models:/{}/{}".format(model_name, 3)
# Formulate the model URI to fetch from the model registery
pred_3 = pd.DataFrame(score_model(score, model_uri), columns=["predicted_3"])
pred_3
```
#### Combine the values into a single pandas DataFrame
```
actual_power["predicted_3"] = pred_3["predicted_3"]
actual_power
```
## Plot the combined predicited results vs the actual power
```
%matplotlib inline
actual_power.plot.line()
```
| github_jupyter |
# Clone the repo
```
# # Clone the entire repo.
# !git clone -b master --single-branch https://github.com/NewLuminous/Zalo-Vietnamese-Wiki-QA.git zaloqa
# %cd zaloqa
```
# Install & load libraries
```
import modeling
import evaluation
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# To reload a module while in the interactive mode
import importlib
importlib.reload(modeling)
```
# Load data
```
from utils import data_loading
zalo_data = data_loading.load(['zaloai'])
zalo_data
```
# Train & evaluate
```
from sklearn.model_selection import train_test_split
X = zalo_data.drop(columns=['label'])
y = zalo_data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1, random_state=42)
```
## LogisticRegression + CountVectorizer
```
model = modeling.get_model('logit')(vectorizer='bow-ngram', random_state=42)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Accuracy:', evaluation.get_accuracy(y_test, y_pred))
evaluation.print_classification_report(y_test, y_pred)
evaluation.plot_confusion_matrix(y_test, y_pred)
```
## LogisticRegression + TfidfVectorizer
```
model = modeling.get_model('logit')(vectorizer='tfidf', random_state=42)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Accuracy:', evaluation.get_accuracy(y_test, y_pred))
evaluation.print_classification_report(y_test, y_pred)
evaluation.plot_confusion_matrix(y_test, y_pred)
```
## LogisticRegression + Keras's Embedding
```
model = modeling.get_model('logit-embedding')()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Accuracy:', evaluation.get_accuracy(y_test, y_pred))
evaluation.print_classification_report(y_test, y_pred)
evaluation.plot_confusion_matrix(y_test, y_pred)
```
## LogisticRegression + Word2Vec
```
model = modeling.get_model('logit')(vectorizer='word2vec', random_state=42)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Accuracy:', evaluation.get_accuracy(y_test, y_pred))
evaluation.print_classification_report(y_test, y_pred)
evaluation.plot_confusion_matrix(y_test, y_pred)
```
## CRNN
```
model = modeling.get_model('crnn')()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Accuracy:', evaluation.get_accuracy(y_test, y_pred))
evaluation.print_classification_report(y_test, y_pred)
evaluation.plot_confusion_matrix(y_test, y_pred)
```
## CRNN + Attention
```
model = modeling.get_model('crnn-attention')()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Accuracy:', evaluation.get_accuracy(y_test, y_pred))
evaluation.print_classification_report(y_test, y_pred)
evaluation.plot_confusion_matrix(y_test, y_pred)
```
| github_jupyter |
<!--NAVIGATION-->
<!--NAVIGATION-->
<!-- markdownlint-disable -->
<h2 align="center" style="font-family:verdana;font-size:150%"> <b>S</b>equencing <b>A</b>nalysis and <b>D</b>ata Library for <b>I</b>mmunoinformatics <b>E</b>xploration <br><br>Demonstration for AIRR-C 2022</h2>
<div align="center">
<img src="https://sadiestaticcrm.s3.us-west-2.amazonaws.com/Sadie.svg" alt="SADIE" style="margin:0.2em;width:50%">
</div>
<br>
<a href="https://colab.research.google.com/github/jwillis0720/sadie/blob/airr_c/notebooks/airr_c/SADIE_DEMO.ipynb"><img align="center" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
# Setup
Here we will setup our files for the demo. If you are running the notebook locally, these files don't need to be pulled from the repository
```
def install_packages() -> None:
!pip -q install git+https://github.com/jwillis0720/sadie.git
!pip -q install seaborn matplotlib
def get_demo_files() -> None:
"""Get the demonstration files for AIRR-C 2022"""
!wget -q -O input.tgz https://github.com/jwillis0720/sadie/raw/airr_c/notebooks/airr_c/input.tgz
!tar -xf input.tgz
import sys
if "google.colab" in sys.modules:
install_packages()
get_demo_files()
else:
%load_ext lab_black
```
# 1. Low Level
First, let's start at a very low level. These are pythonic objects that model the data we expect in an AIRR compliant data format. They are divided by [AIRR 1.3 Rearragment category](https://docs.airr-community.org/en/stable/datarep/rearrangements.html)
* Input Sequence
* Primay Annotations
* Alignment Annotations
* Alignment Positions
* RegionSequences
* RegionPositions
All of these are combined as a `Receptor Chain` Object.
Now let's take a look how a person interested in low level programming could use these objects
## First Model - Input Sequence
```
from sadie.receptor.rearrangment import InputSequence
from Bio import SeqIO
from pprint import pprint
vrc01_heavy_sequecne = SeqIO.read("input/vrc01_heavy.fasta", "fasta")
# make an input sequence model
input_sequence_model = InputSequence(
sequence_id=vrc01_heavy_sequecne.name,
sequence=vrc01_heavy_sequecne.seq,
raw_sequence=vrc01_heavy_sequecne.seq,
)
# Print out dictionary to see
pprint(input_sequence_model.__dict__)
```
## Second Model - Primary Annotations
```
from sadie.receptor.rearrangment import PrimaryAnnotations
# make a primary sequence model
primary_sequence_annotation_model = PrimaryAnnotations(
rev_comp=False,
productive=True,
vj_in_frame=True,
stop_codon=False,
complete_vdj=True,
locus="IGH",
v_call="IGHV1-2*02",
d_call=["IGHD3-16*01", "IGHD3-16*02"],
j_call="IGHJ1*01",
v_call_top="IGHV1-2*02",
d_call_top="IGHD3-16*01",
j_call_top="IGHJ1*01",
c_call="IGHG1*01",
)
# pretty print the dictionary attribute
pprint(primary_sequence_annotation_model.__dict__)
```
## Alignment Annotations
```
from sadie.receptor.rearrangment import AlignmentAnnotations
# Model 3 - Alignment Annotations
alignment_annotations_model = AlignmentAnnotations(
sequence_alignment="CAGGTGCAGCTGGTGCAGTCTGGGGGTCAGATGAAGAAGCCTGGCGAGTCGATGAGAATTTCTTGTCGGGCTTCTGGATATGAATTTATTGATTGTACGCTAAATTGGATTCGTCTGGCCCCCGGAAAAAGGCCTGAGTGGATGGGATGGCTGAAGCCTCGGGGGGGGGCCGTCAACTACGCACGTCCACTTCAGGGCAGAGTGACCATGACTCGAGACGTTTATTCCGACACAGCCTTTTTGGAGCTGCGCTCGTTGACAGTAGACGACACGGCCGTCTACTTTTGTACTAGGGGAAAAAACTGTGATTACAATTGGGACTTCGAACACTGGGGCCGGGGCACCCCGGTCATCGTCTCATCAG",
sequence_alignment_aa="QVQLVQSGGQMKKPGESMRISCRASGYEFIDCTLNWIRLAPGKRPEWMGWLKPRGGAVNYARPLQGRVTMTRDVYSDTAFLELRSLTVDDTAVYFCTRGKNCDYNWDFEHWGRGTPVIVSS",
germline_alignment="CAGGTGCAGCTGGTGCAGTCTGGGGCTGAGGTGAAGAAGCCTGGGGCCTCAGTGAAGGTCTCCTGCAAGGCTTCTGGATACACCTTCACCGGCTACTATATGCACTGGGTGCGACAGGCCCCTGGACAAGGGCTTGAGTGGATGGGATGGATCAACCCTAACAGTGGTGGCACAAACTATGCACAGAAGTTTCAGGGCAGGGTCACCATGACCAGGGACACGTCCATCAGCACAGCCTACATGGAGCTGAGCAGGCTGAGATCTGACGACACGGCCGTGTATTACTGTGCGAGNNNNNNNNNNNNTGATTACGTTTGGGACTTCCAGCACTGGGGCCAGGGCACCCTGGTCACCGTCTCCTCAG",
germline_alignment_aa="QVQLVQSGAEVKKPGASVKVSCKASGYTFTGYYMHWVRQAPGQGLEWMGWINPNSGGTNYAQKFQGRVTMTRDTSISTAYMELSRLRSDDTAVYYCAXXXXXDYVWDFQHWGQGTLVTVSS",
v_score=168.2,
d_score=17.8,
j_score=52.6,
v_identity=0.6825,
d_identity=0.85,
j_identity=0.86,
v_cigar="6S293M76S3N",
d_cigar="311S6N14M50S17N",
j_cigar="325S7N45M5S",
v_support=6.796e-44,
d_support=0.5755,
j_support=5.727e-11,
junction="TGTACTAGGGGAAAAAACTGTGATTACAATTGGGACTTCGAACACTGG",
junction_aa="CTRGKNCDYNWDFEHW",
np1="GGGAAAAAACTG",
c_score=100,
c_identity=1,
c_support=1e-44,
c_cigar="6S293M76S3N",
)
# alignment_sequence_annotation_model = AlignmentAnnotations(**alignment_dict)
pprint(alignment_annotations_model.__dict__)
```
# Optional but recommended models
## AlignmentPositions
```
from sadie.receptor.rearrangment import AlignmentPositions
alignment_positions_dict = dict(
v_sequence_start=7,
v_sequence_end=299,
v_germline_start=1,
v_germline_end=293,
v_alignment_start=1,
v_alignment_end=293,
d_sequence_start=312,
d_sequence_end=325,
d_germline_start=7,
d_germline_end=20,
d_alignment_start=306,
d_alignment_end=319,
j_sequence_start=326,
j_sequence_end=370,
j_germline_start=8,
j_germline_end=52,
j_alignment_start=320,
j_alignment_end=364,
)
alignment_positions_model = AlignmentPositions(**alignment_positions_dict)
# pretty print dictonary
pprint(alignment_positions_model.__dict__)
```
## RegionSequences
```
from sadie.receptor.rearrangment import RegionSequences
region_sequence_dict = dict(
fwr="CAGGTGCAGCTGGTGCAGTCTGGGGGTCAGATGAAGAAGCCTGGCGAGTCGATGAGAATTTCTTGTCGGGCTTCT",
fwr1_aa="QVQLVQSGGQMKKPGESMRISCRAS",
cdr1="GGATATGAATTTATTGATTGTACG",
cdr1_aa="GYEFIDCT",
fwr2="CTAAATTGGATTCGTCTGGCCCCCGGAAAAAGGCCTGAGTGGATGGGATGG",
fwr2_aa="LNWIRLAPGKRPEWMGW",
cdr2="CTGAAGCCTCGGGGGGGGGCCGTC",
cdr2_aa="LKPRGGAV",
fwr3="AACTACGCACGTCCACTTCAGGGCAGAGTGACCATGACTCGAGACGTTTATTCCGACACAGCCTTTTTGGAGCTGCGCTCGTTGACAGTAGACGACACGGCCGTCTACTTTTGT",
fwr3_aa="NYARPLQGRVTMTRDVYSDTAFLELRSLTVDDTAVYFC",
cdr3="ACTAGGGGAAAAAACTGTGATTACAATTGGGACTTCGAACAC",
cdr3_aa="TRGKNCDYNWDFEH",
fwr4="TGGGGCCGGGGCACCCCGGTCATCGTCTCATCA",
fwr4_aa="WGRGTPVIVSS",
)
region_sequence_model = RegionSequences(**region_sequence_dict)
pprint(region_sequence_model.__dict__)
from sadie.receptor.rearrangment import RegionPositions
region_positions_dict = dict(
fwr1_start=7,
fwr1_end=81,
cdr1_start=82,
cdr1_end=105,
fwr2_start=106,
fwr2_end=156,
cdr2_start=157,
cdr2_end=180,
fwr3_start=181,
fwr3_end=294,
cdr3_start=295,
cdr3_end=336,
fwr4_start=337,
fwr4_end=369,
)
region_position_model = RegionPositions(**region_positions_dict)
pprint(region_position_model.__dict__)
```
# Junction Lengths
```
from sadie.receptor.rearrangment import JunctionLengths
junction_length_dict = dict(
junction_length=48,
junction_aa_length=None,
np1_length=None,
np2_length=None,
np3_length=None,
n1_length=None,
n2_length=None,
n3_length=None,
p3v_length=None,
p5d_length=None,
p3d_length=None,
p5d2_length=None,
p3d2_length=None,
p5j_length=None,
)
junction_length_model = JunctionLengths(**junction_length_dict)
pprint(junction_length_model.__dict__)
```
## ReceptorChain
All of those annotations can now be [composed](https://www.youtube.com/watch?v=0mcP8ZpUR38) into a ReceptorChain model
```
from sadie.receptor.rearrangment import ReceptorChain
receptor_chain = ReceptorChain(
input_sequence=input_sequence_model,
primary_annotations=primary_sequence_annotation_model,
alignment_annotations=alignment_annotations_model,
alignment_positions=alignment_positions_model,
region_sequences=region_sequence_model,
region_positions=region_sequence_model,
junction_lengths=junction_length_model,
)
print(receptor_chain)
```
# 2. Mid-level
Okay, but maybe you don't even care about composing low level objects. You just have a sequence without the proper annotations. You can use convienience methods to quickly fill in the annotations in the model. How does it align and annotate? More on that later
```
receptor_chain = ReceptorChain.from_single("vrc01_heavy", vrc01_heavy_sequecne.seq)
# Same as before but from the `from_single` method
print(receptor_chain)
```
<h1><u> Using the SADIE AIRR module:</u></h1>
SADIE AIRR will annotate sequences, verify fields, and return an AirrTable. The AirrTable is a subclass of a pandas dataframe so anything you can do on pandas, you can do on an AirrTable.
There are a variety of databases that ship with SADIE:
<u>From IMGT</u>
- CLK
- Dog
- Human
- Mouse
- Rabbit
- Rat
<u> Custom </u>
- Macaque
```
def plot_v_genes(df_one, df_two, colors=["red", "blue"]):
"""very simple function to plot v gene dataframes"""
fig, axes = plt.subplots(1, 2, figsize=(15, 3))
for df, axis, color in zip([df_one, df_two], axes, colors):
df["v_call_top"].str.split("*").str.get(0).value_counts().plot(
kind="bar", color=color, ax=axis
)
axis.set_ylabel("Counts")
sns.despine()
from sadie.airr import Airr
import seaborn as sns
from matplotlib import pyplot as plt
import logging
logger = logging.getLogger()
logger.setLevel("INFO")
airr_api_human = Airr("human", database="imgt", adaptable=True)
catnap_heavy_base = airr_api_human.run_fasta("input/catnap_nt_heavy_sub.fasta")
catnap_light_base = airr_api_human.run_fasta("input/catnap_nt_light_sub.fasta")
from sadie.airr.airrtable import LinkedAirrTable
catnap_merged = LinkedAirrTable(
catnap_heavy_base.merge(
catnap_heavy_base, on="sequence_id", how="inner", suffixes=["_heavy", "_light"]
)
)
# make a pretty plot of the V gene usage
plot_v_genes(catnap_heavy_base, catnap_light_base)
```
## Alternate species
Okay, but what about a different species. Let's try the mouse repertoire as identified by IMGT
```
airr_api_mouse = Airr("mouse", database="imgt", adaptable=False)
catnap_heavy_mouse = airr_api_mouse.run_fasta("input/catnap_nt_heavy_sub.fasta")
catnap_light_mouse = airr_api_mouse.run_fasta("input/catnap_nt_light_sub.fasta")
plot_v_genes(catnap_heavy_mouse, catnap_heavy_mouse)
```
## Custom Databases - How about Watson/Karlsson-Hedestam?
In this instance, instead of calling things from IMGT, let's use a custom database we have in [G3](https://g3.jordanrwillis.com/docs)
```
airr_api_macaque = Airr("macaque", database="custom", adaptable=False)
catnap_heavy_macaque = airr_api_macaque.run_fasta("input/catnap_nt_heavy_sub.fasta")
catnap_light_macaque = airr_api_macaque.run_fasta("input/catnap_nt_light_sub.fasta")
plot_v_genes(catnap_heavy_macaque, catnap_heavy_macaque)
```
<h1><u> Using the SADIE Reference module:</u></h1>
SADIE uses a reference database. It uses a real time web API caled the *G*ermline *G*ene *G*ateway which provides realtime, currated genes avaialble via a RESTful API that conforms to [OpenAPI standards](https://swagger.io/specification/)
[Let's take a look at the reference database](https://g3.jordanrwillis.com/docs)
Since it's RESTful, we can gather database information programatically in real time!
```
import requests
import pandas as pd
# We can just query our gene database progrmatically...this is super handy if you are changing reference databases on the fly
results_json = requests.get(
"https://g3.jordanrwillis.com/api/v1/genes?source=imgt&common=human&segment=V&limit=3"
).json()
# turn the JSON into a dataframe
results_df = pd.json_normalize(results_json)
results_df
```
## Using reference objects to make custom/altered reference databaes
```
import tempfile
from sadie.reference import Reference
# create empty reference object
reference = Reference()
# Add Genes one at a time, right in the program
reference.add_gene(
{
"species": "custom",
"sub_species": "human",
"gene": "IGHV1-2*01",
"database": "imgt",
}
)
reference.add_gene(
{
"species": "custom",
"sub_species": "human",
"gene": "IGHV3-15*01",
"database": "imgt",
}
)
reference.add_gene(
{
"species": "custom",
"sub_species": "human",
"gene": "IGHJ6*01",
"database": "imgt",
}
)
reference.add_gene(
{
"species": "custom",
"sub_species": "mouse",
"gene": "IGKJ5*01",
"database": "imgt",
}
)
reference.add_gene(
{
"species": "custom",
"sub_species": "mouse",
"gene": "IGKV10-96*04",
"database": "imgt",
}
)
reference.add_gene(
{
"species": "custom",
"sub_species": "human",
"gene": "IGHD3-3*01",
"database": "imgt",
}
)
# Add a mouse gene in humans!
reference.add_gene(
{
"species": "custom",
"sub_species": "mouse",
"gene": "IGHV1-11*01",
"database": "imgt",
}
)
logger.setLevel("WARNING")
reference.get_dataframe()
# When we setup the API we can now pass a reference object we built
custom_airr_api = Airr(reference)
# our custom results
output_custom_results_heavy = custom_airr_api.run_fasta(
"input/catnap_nt_heavy_sub.fasta"
)
output_custom_results_light = custom_airr_api.run_fasta(
"input/catnap_nt_light_sub.fasta"
)
plot_v_genes(output_custom_results_heavy, output_custom_results_light)
```
# SADIE Numbeing for AA sequences
Inspired from ANARCI, we can also renumber AA sequences in the following schemes:
* Kabat
* Chothia
* IMGT
* Martin
* Aho
And be able to deliniate CDR boundaries from
* Kabat
* Chothia
* IMGT
* SCDR
```
from sadie.hmmer import HMMER
# setup numbering api
hmmer_numbering_api = HMMER("imgt", "imgt")
results = hmmer_numbering_api.run_dataframe(catnap_heavy_base, "sequence_id", "vdj_aa")
results_imgt = results.drop(["domain_no", "hmm_species", "score"], axis=1).rename(
{"Id": "sequence_id"}, axis=1
)
results_imgt
# Kabat numbering with Chothia boundaries
hmmer_numbering_api = HMMER("kabat", "chothia")
results = hmmer_numbering_api.run_dataframe(catnap_heavy_base, "sequence_id", "vdj_aa")
chothia_results = results.drop(["domain_no", "hmm_species", "score"], axis=1)
alignment_numbering = chothia_results.get_alignment_table()
alignment_numbering
```
Now it's super easy to change your sequencing data into a one hot vector for ML training
```
one_hot_encoded = pd.get_dummies(alignment_numbering.iloc[:, 3:])
chothia_results["Id"].to_frame().join(one_hot_encoded).reset_index(drop=True)
```
# Sadie Mutational analysis
These methods can be used at a higher level to give specific mutations given a numbering scheme
```
from sadie.airr.methods import run_mutational_analysis
catnap_heavy_with_mutations = run_mutational_analysis(catnap_heavy_base, "kabat")
```
# Sadie Clustering
And finally, we can use an agglomerative clustering approach (inspired from Briney Clonify)
```
from sadie.cluster import Cluster
cluster_api = Cluster(
catnap_heavy_with_mutations,
lookup=["cdr1_aa", "cdr2_aa", "cdr3_aa"],
pad_somatic=True,
)
cluster_df = cluster_api.cluster(6)
distance_frame = cluster_api.distance_df
from scipy.cluster import hierarchy as hc
from scipy.spatial.distance import squareform
distance_frame = cluster_api.distance_df
total_clusters = list(cluster_df["cluster"].unique())
clustuer_pal = sns.husl_palette(len(total_clusters), s=2)
cluster_lut = dict(zip(map(int, total_clusters), clustuer_pal))
row_colors = pd.DataFrame(cluster_df)["cluster"].apply(lambda x: cluster_lut[x])
linkage = hc.linkage(squareform(distance_frame), method="average")
g = sns.clustermap(
distance_frame,
method="complete",
row_linkage=linkage,
col_linkage=linkage,
row_colors=row_colors.to_numpy(),
col_colors=row_colors.to_numpy(),
dendrogram_ratio=(0.1, 0.1),
cbar_pos=(1, 0.32, 0.03, 0.2),
# linewidths=0.1,
figsize=(7.6 * 0.9, 7.6 * 0.9),
tree_kws={"linewidths": 1},
)
```
# High level - command line apps
but what if you just want to use a command line app. We got you covered
```
!sadie airr -s human --skip-mutation input/catnap_nt_heavy_sub.fasta test.tsv
pd.read_csv("test.tsv", delimiter="\t", index_col=0)
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# Text classification with TensorFlow Hub: Movie reviews
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/text_classification_with_hub"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/text_classification_with_hub.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/text_classification_with_hub.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/keras/text_classification_with_hub.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of *binary*—or two-class—classification, an important and widely applicable kind of machine learning problem.
The tutorial demonstrates the basic application of transfer learning with TensorFlow Hub and Keras.
We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews.
This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow, and [TensorFlow Hub](https://www.tensorflow.org/hub), a library and platform for transfer learning. For a more advanced text classification tutorial using `tf.keras`, see the [MLCC Text Classification Guide](https://developers.google.com/machine-learning/guides/text-classification/).
```
import numpy as np
import tensorflow as tf
!pip install tensorflow-hub
!pip install tfds-nightly
import tensorflow_hub as hub
import tensorflow_datasets as tfds
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
```
## Download the IMDB dataset
The IMDB dataset is available on [imdb reviews](https://www.tensorflow.org/datasets/catalog/imdb_reviews) or on [TensorFlow datasets](https://www.tensorflow.org/datasets). The following code downloads the IMDB dataset to your machine (or the colab runtime):
```
# Split the training set into 60% and 40%, so we'll end up with 15,000 examples
# for training, 10,000 examples for validation and 25,000 examples for testing.
train_data, validation_data, test_data = tfds.load(
name="imdb_reviews",
split=('train[:60%]', 'train[60%:]', 'test'),
as_supervised=True)
```
## Explore the data
Let's take a moment to understand the format of the data. Each example is a sentence representing the movie review and a corresponding label. The sentence is not preprocessed in any way. The label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.
Let's print first 10 examples.
```
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
train_examples_batch
```
Let's also print the first 10 labels.
```
train_labels_batch
```
## Build the model
The neural network is created by stacking layers—this requires three main architectural decisions:
* How to represent the text?
* How many layers to use in the model?
* How many *hidden units* to use for each layer?
In this example, the input data consists of sentences. The labels to predict are either 0 or 1.
One way to represent the text is to convert sentences into embeddings vectors. We can use a pre-trained text embedding as the first layer, which will have three advantages:
* we don't have to worry about text preprocessing,
* we can benefit from transfer learning,
* the embedding has a fixed size, so it's simpler to process.
For this example we will use a **pre-trained text embedding model** from [TensorFlow Hub](https://www.tensorflow.org/hub) called [google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1).
There are three other pre-trained models to test for the sake of this tutorial:
* [google/tf2-preview/gnews-swivel-20dim-with-oov/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1) - same as [google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1), but with 2.5% vocabulary converted to OOV buckets. This can help if vocabulary of the task and vocabulary of the model don't fully overlap.
* [google/tf2-preview/nnlm-en-dim50/1](https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1) - A much larger model with ~1M vocabulary size and 50 dimensions.
* [google/tf2-preview/nnlm-en-dim128/1](https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1) - Even larger model with ~1M vocabulary size and 128 dimensions.
Let's first create a Keras layer that uses a TensorFlow Hub model to embed the sentences, and try it out on a couple of input examples. Note that no matter the length of the input text, the output shape of the embeddings is: `(num_examples, embedding_dimension)`.
```
embedding = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embedding, input_shape=[],
dtype=tf.string, trainable=True)
hub_layer(train_examples_batch[:3])
```
Let's now build the full model:
```
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1))
model.summary()
```
The layers are stacked sequentially to build the classifier:
1. The first layer is a TensorFlow Hub layer. This layer uses a pre-trained Saved Model to map a sentence into its embedding vector. The pre-trained text embedding model that we are using ([google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1)) splits the sentence into tokens, embeds each token and then combines the embedding. The resulting dimensions are: `(num_examples, embedding_dimension)`.
2. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.
3. The last layer is densely connected with a single output node.
Let's compile the model.
### Loss function and optimizer
A model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs logits (a single-unit layer with a linear activation), we'll use the `binary_crossentropy` loss function.
This isn't the only choice for a loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.
Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.
Now, configure the model to use an optimizer and a loss function:
```
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
```
## Train the model
Train the model for 20 epochs in mini-batches of 512 samples. This is 20 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:
```
history = model.fit(train_data.shuffle(10000).batch(512),
epochs=20,
validation_data=validation_data.batch(512),
verbose=1)
```
## Evaluate the model
And let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
```
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
print("%s: %.3f" % (name, value))
```
This fairly naive approach achieves an accuracy of about 87%. With more advanced approaches, the model should get closer to 95%.
## Further reading
For a more general way to work with string inputs and for a more detailed analysis of the progress of accuracy and loss during training, see the [Text classification with preprocessed text](./text_classification.ipynb) tutorial.
| github_jupyter |
# TOC
1. [Settings](#Settings)
2. [Get the task list](#Get-the-task-list)
3. [Upload annotations](#Upload-annotations)
4. [Get annotation results](#Get-annotation-results)
5. [Get annotation detail log](#Get-annotation-detail-log)
# Settings
```
import init
import pandas as pd
import json
import requests
host = 'http://api:5000'
headers = {}
# Cloud Run API needs Authorization
host = 'https://******.a.run.app'
headers = {
'Authorization': 'Bearer <TOKEN>'
}
```
# Get the task list
```
res = requests.get(f'{host}/tasks', headers=headers).json()
pd.DataFrame(res)[:3]
```
# Upload annotations
## Card UI
- [Task-dependent schema#card](https://github.com/CyberAgent/fast-annotation-tool/wiki/%E3%82%BF%E3%82%B9%E3%82%AF%E4%BE%9D%E5%AD%98%E3%81%AE%E3%82%B9%E3%82%AD%E3%83%BC%E3%83%9E#card)
<img src="https://user-images.githubusercontent.com/17490886/101377448-2b53fe80-38f5-11eb-8f46-0b154fc60138.png" alt="image" />
```
# Make annotation data
annotations_data = [
{
"text": f"This is a test{i}.",
"show_ambiguous_button": True,
"hidden_data": {
"desc": "Data for aggregation. It can be a dictionary or a string."
}
} for i in range(100)
]
df_annotation = pd.DataFrame(annotations_data)
df_annotation[:3]
# Post task data
post_data = {
"task_id": "card-demo-20200602",
"annotation_type": "card",
"title": "Card Demo",
"question": "This is card demo",
"description": "This is a card demo, so feel free to annotate it as you wish.",
"annotations_data": annotations_data
}
res = requests.post(f'{host}/tasks', headers=headers, json=post_data).json()
res
```
## Multi-Label UI
- [Task-dependent schema#multilabel](https://github.com/CyberAgent/fast-annotation-tool/wiki/%E3%82%BF%E3%82%B9%E3%82%AF%E4%BE%9D%E5%AD%98%E3%81%AE%E3%82%B9%E3%82%AD%E3%83%BC%E3%83%9E#multilabel)

```
# Make annotation data
annotation_data = [
{
"text": f"This is a test{i}.",
"choices": ["ChoiceA", "ChoiceB", "ChoiceC", "ChoiceD"],
"baseline_text": "Baseline Text",
"hidden_data": {
"desc": "Data for aggregation. It can be a dictionary or a string."
}
}
for i in range(100)
]
df_annotation = pd.DataFrame(annotation_data)
df_annotation[:3]
# Post task data
post_data = {
"task_id": "multilabel-demo-20200602",
"annotation_type": "multi_label",
"title": "Multi-Label Demo",
"question": "This is multi-label demo",
"description": "This is a multi-label demo, so feel free to annotate it as you wish.",
"annotations_data": annotation_data
}
res = requests.post(f'{host}/tasks', headers=headers, json=post_data).json()
res
```
# Get annotation results
```
%%time
task_id = "card-demo-20200602"
res = requests.get(f'{host}/tasks/{task_id}', headers=headers).json()
# Task Info
res['task']
# Annotation data and annotator responses
df_res = pd.DataFrame(res['annotations'])
df_res['name'] = '****'
df_res['email'] = '****'
df_res[~df_res.result_data.isna()][:3]
```
# Get annotation detail log
```
task_id = "card-demo-20200602"
res = requests.get(f'{host}/tasks/{task_id}/logs', headers=headers).json()
df_res = pd.DataFrame(res['logs'])
df_res['name'] = '****'
df_res['email'] = '****'
df_res.sample(5)
```
| github_jupyter |
# Real-world use-cases at scale!
# Imports
Let's start with imports.
```
import sys
sys.path.append("gpu_bdb_runner.egg")
import gpu_bdb_runner as gpubdb
import os
import inspect
from highlight_code import print_code
config_options = {}
config_options['JOIN_PARTITION_SIZE_THRESHOLD'] = os.environ.get("JOIN_PARTITION_SIZE_THRESHOLD", 300000000)
config_options['MAX_DATA_LOAD_CONCAT_CACHE_BYTE_SIZE'] = os.environ.get("MAX_DATA_LOAD_CONCAT_CACHE_BYTE_SIZE", 400000000)
config_options['BLAZING_DEVICE_MEM_CONSUMPTION_THRESHOLD'] = os.environ.get("BLAZING_DEVICE_MEM_CONSUMPTION_THRESHOLD", 0.6)
config_options['BLAZ_HOST_MEM_CONSUMPTION_THRESHOLD'] = os.environ.get("BLAZ_HOST_MEM_CONSUMPTION_THRESHOLD", 0.6)
config_options['MAX_KERNEL_RUN_THREADS'] = os.environ.get("MAX_KERNEL_RUN_THREADS", 3)
config_options['TABLE_SCAN_KERNEL_NUM_THREADS'] = os.environ.get("TABLE_SCAN_KERNEL_NUM_THREADS", 1)
config_options['MAX_NUM_ORDER_BY_PARTITIONS_PER_NODE'] = os.environ.get("MAX_NUM_ORDER_BY_PARTITIONS_PER_NODE", 20)
config_options['ORDER_BY_SAMPLES_RATIO'] = os.environ.get("ORDER_BY_SAMPLES_RATIO", 0.0002)
config_options['NUM_BYTES_PER_ORDER_BY_PARTITION'] = os.environ.get("NUM_BYTES_PER_ORDER_BY_PARTITION", 400000000)
config_options['MAX_ORDER_BY_SAMPLES_PER_NODE'] = os.environ.get("MAX_ORDER_BY_SAMPLES_PER_NODE", 10000)
config_options['MAX_SEND_MESSAGE_THREADS'] = os.environ.get("MAX_SEND_MESSAGE_THREADS", 20)
config_options['MEMORY_MONITOR_PERIOD'] = os.environ.get("MEMORY_MONITOR_PERIOD", 50)
config_options['TRANSPORT_BUFFER_BYTE_SIZE'] = os.environ.get("TRANSPORT_BUFFER_BYTE_SIZE", 10485760) # 10 MBs
config_options['TRANSPORT_POOL_NUM_BUFFERS'] = os.environ.get("TRANSPORT_POOL_NUM_BUFFERS", 100)
config_options['BLAZING_LOGGING_DIRECTORY'] = os.environ.get("BSQL_BLAZING_LOGGING_DIRECTORY", 'blazing_log')
config_options['BLAZING_CACHE_DIRECTORY'] = os.environ.get("BSQL_BLAZING_CACHE_DIRECTORY", '/tmp/')
config_options['LOGGING_LEVEL'] = os.environ.get("LOGGING_LEVEL", "trace")
config_options['MAX_JOIN_SCATTER_MEM_OVERHEAD'] = os.environ.get("MAX_JOIN_SCATTER_MEM_OVERHEAD", 500000000)
config_options['NETWORK_INTERFACE'] = os.environ.get("NETWORK_INTERFACE", 'ens5')
```
# Start the runner
```
runner = gpubdb.GPU_BDB_Runner(
scale='SF1'
, client_type='cluster'
, bucket='bsql'
, data_dir='s3://bsql/data/tpcx_bb/sf1/'
, output_dir='tpcx-bb-runner/results'
, **config_options
)
```
# Use cases for review
## Use case 2
**Question:** Find the top 30 products that are mostly viewed together with a given product in online store. Note that the order of products viewed does not matter, and "viewed together" relates to a web_clickstreams, click_session of a known user with a session timeout of 60 min. If the duration between two click of a user is greater then the session timeout, a new session begins. With a session timeout of 60 min.
Let's peek inside the code:
```
q2_code = inspect.getsource(gpubdb.queries.gpu_bdb_queries.gpu_bdb_query_02).split('\n')
print_code('\n'.join(q2_code[92:-18]))
```
The `get_distinct_sessions` is defined as follows:
```
print_code('\n'.join(q2_code[73:77]))
```
It calls the `get_sessions`
```
print_code('\n'.join(q2_code[64:72]))
```
Let's have a look at the `get_session_id` method
```
print_code('\n'.join(q2_code[34:63]))
```
Now that we know how this works - let's run the query
```
runner.run_query(2, repeat=1, validate_results=False)
```
## Use case 23
**Question:** This Query contains multiple, related iterations:
1. Iteration 1: Calculate the coefficient of variation and mean of every item and warehouse of the given and the consecutive month.
2. Iteration 2: Find items that had a coefficient of variation of 1.3 or larger in the given and the consecutive month
```
q23_code = inspect.getsource(gpubdb.queries.gpu_bdb_queries.gpu_bdb_query_23).split('\n')
print_code('\n'.join(q23_code[23:-12]))
runner.run_query(23, repeat=1, validate_results=False)
```
# Remaining usecases
## Use case 1
**Question:** Find top ***100*** products that are sold together frequently in given stores. Only products in certain categories ***(categories 2 and 3)*** sold in specific stores are considered, and "sold together frequently" means at least ***50*** customers bought these products together in a transaction.
In ANSI-SQL code the solution would look somewhat similar to the one below.
```
runner.run_query(1, repeat=1, validate_results=False)
```
## Use case 3
**Question:** For a given product get a top 30 list sorted by number of views in descending order of the last 5 products that are mostly viewed before the product was purchased online. For the viewed products, consider only products in certain item categories and viewed within 10 days before the purchase date.
```
runner.run_query(3, repeat=1, validate_results=False)
```
## Use case 4
**Question:** Web_clickstream shopping cart abandonment analysis: For users who added products in their shopping carts but did not check out in the online store during their session, find the average number of pages they visited during their sessions. A "session" relates to a click_session of a known user with a session time-out of 60 min. If the duration between two clicks of a user is greater then the session time-out, a new session begins.
```
runner.run_query(4, repeat=1, validate_results=False)
```
## Use case 5
**Question**: Build a model using logistic regression for a visitor to an online store: based on existing users online activities (interest in items of different categories) and demographics. This model will be used to predict if the visitor is interested in a given item category. Output the precision, accuracy and confusion matrix of model. *Note:* no need to actually classify existing users, as it will be later used to predict interests of unknown visitors.
```
runner.run_query(5, repeat=1, validate_results=False)
```
## Use case 6
Identifies customers shifting their purchase habit from store to web sales. Find customers who spend in relation more money in the second year following a given year in the web_sales channel then in the store sales channel. Report customers details: first name, last name, their country of origin, login name and email address, and identify if they are preferred customer, for the top 100 customers with the highest increase intheir second year web purchase ratio.
```
runner.run_query(6, repeat=1, validate_results=False)
```
## Use case 7
**Question:** List top 10 states in descending order with at least 10 customers who during a given month bought products with the price tag at least 20% higher than the average price of products in the same category.
```
runner.run_query(7, repeat=1, validate_results=False)
```
## Use case 8
**Question:** For online sales, compare the total sales monetary amount in which customers checked online reviews before making the purchase and that of sales in which customers did not read reviews. Consider only online sales for a specific category in a given year.
```
runner.run_query(8, repeat=1, validate_results=False)
```
## Use case 9
**Question:** Aggregate total amount of sold items over different given types of combinations of customers based on selected groups of marital status, education status, sales price and different combinations of state and sales/profit.
```
runner.run_query(9, repeat=1, validate_results=False)
```
## Use case 10
**Question:** For all products, extract sentences from its product reviews that contain positive or negative sentiment and display for each item the sentiment polarity of the extracted sentences (POS OR NEG) and the sentence and word in sentence leading to this classification.
```
runner.run_query(10, repeat=1, validate_results=False, additional_resources_path='s3://bsql/data/tpcx_bb/additional_resources')
```
## Use case 11
**Question:** For a given product, measure the correlation of sentiments, including the number of reviews and average review ratings, on product monthly revenues within a given time frame.
```
runner.run_query(11, repeat=1, validate_results=False)
```
## Use case 12
**Question:** Find all customers who viewed items of a given category on the web in a given month and year that was followed by an instore purchase of an item from the same category in the three consecutive months.
```
runner.run_query(12, repeat=1, validate_results=False)
```
## Use case 13
**Question:** Display customers with both store and web sales in consecutive years for whom the increase in web sales exceeds the increase in store sales for a specified year.
```
runner.run_query(13, repeat=1, validate_results=False)
```
## Use case 14
**Question:** What is the ratio between the number of items sold over the internet in the morning (7 to 8am) to the number of items sold in the evening (7 to 8pm) of customers with a specified number of dependents. Consider only websites with a high amount of content.
```
runner.run_query(14, repeat=1, validate_results=False)
```
## Use case 15
**Question:** Find the categories with flat or declining sales for in store purchases during a given year for a given store.
```
runner.run_query(15, repeat=1, validate_results=False)
```
## Use case 16
**Question:** Compute the impact of an item price change on the store sales by computing the total sales for items in a 30 day period before and after the price change. Group the items by location of warehouse where they were delivered from.
```
runner.run_query(16, repeat=1, validate_results=False)
```
## Use case 17
**Question:** Find the ratio of items sold with and without promotions in a given month and year. Only items in certain categories sold to customers living in a specific time zone are considered.
```
runner.run_query(17, repeat=1, validate_results=False)
```
## Use case 18
**Question:** Identify the stores with flat or declining sales in 4 consecutive months, check if there are any negative reviews regarding these stores available online.
```
runner.run_query(18, repeat=1, validate_results=False, additional_resources_path='s3://bsql/data/tpcx_bb/additional_resources')
```
## Use case 19
**Question:** Retrieve the items with the highest number of returns where the number of returns was approximately equivalent across all store and web channels (within a tolerance of +/ 10%), within the week ending given dates. Analyse the online reviews for these items to see if there are any negative reviews.
```
runner.run_query(19, repeat=1, validate_results=False, additional_resources_path='s3://bsql/data/tpcx_bb/additional_resources')
```
## Use case 20
**Question:** Customer segmentation for return analysis: Customers are separated along the following dimensions:
1. return frequency,
2. return order ratio (total number of orders partially or fully returned versus the totalnumber of orders),
3. return item ratio (total number of items returned versus the number of itemspurchased),
4. return amount ration (total monetary amount of items returned versus the amount purchased),
5. return order ratio.
Consider the store returns during a given year for the computation.
```
runner.run_query(20, repeat=1, validate_results=False)
```
## Use case 21
**Question:** Get all items that were sold in stores in a given month and year and which were returned in the next 6 months and repurchased by the returning customer afterwards through the web sales channel in the following three years. For those items, compute the total quantity sold through the store, the quantity returned and the quantity purchased through the web. Group this information by item and store.
```
runner.run_query(21, repeat=1, validate_results=False)
```
## Use case 22
**Question:** For all items whose price was changed on a given date, compute the percentage change in inventorybetween the 30 day period BEFORE the price change and the 30 day period AFTER the change. Group this information by warehouse.
```
runner.run_query(22, repeat=1, validate_results=False)
```
## Use case 24
**Question:** For a given product, measure the effect of competitor's prices on products' in store and online sales.Compute the crossprice elasticity of demand for a given product.
```
runner.run_query(24, repeat=1, validate_results=False)
```
## Use case 25
**Question:** Customer segmentation analysis: Customers are separated along the following key shopping dimensions:
1. recency of last visit,
2. frequency of visits and monetary amount.
Use the store and online purchase data during a given year to compute. After model of separation is build, report for the analysed customers towhich "group" they where assigned.
```
runner.run_query(25, repeat=1, validate_results=False)
```
## Use case 26
**Question:** Cluster customers into book buddies/club groups based on their in store book purchasing histories. Aftermodel of separation is build, report for the analysed customers to which "group" they where assigned.
```
runner.run_query(26, repeat=1, validate_results=False)
```
## Use case 27
**Question:** For a given product, find "competitor" company names in the product reviews. Display review id, product id, "competitor’s" company name and the related sentence from the online review
```
# runner.run_query(27, repeat=1, validate_results=False)
```
## Use case 28
**Question:** Build text classifier for online review sentiment classification (Positive, Negative, Neutral), using 90% of available reviews for training and the remaining 10% for testing. Display classifier accuracy on testing dataand classification result for the 10% testing data: \<reviewSK\>, \<originalRating\>, \<classificationResult\>.
```
runner.run_query(28, repeat=1, validate_results=False)
```
## Use case 29
**Question:** Perform category affinity analysis for products purchased together online.
```
runner.run_query(29, repeat=1, validate_results=False)
```
## Use case 30
**Question:** Perform category affinity analysis for products viewed together online. Note that the order of products viewed does not matter, and "viewed together" relates to a click_session of a user with a session timeout of 60 min. If the duration between two clicks of a user is greater then the session timeout, a new session begins.
```
runner.run_query(30, repeat=1, validate_results=False)
```
| github_jupyter |
```
import time
notebookstart= time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.model_selection import KFold
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.linear_model import LogisticRegression
import category_encoders as ce
from imblearn.under_sampling import RandomUnderSampler
from catboost import CatBoostClassifier
# Gradient Boosting
import lightgbm as lgb
import xgboost as xgb
import category_encoders as ce
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from scipy.cluster.vq import kmeans2, whiten
from sklearn.neighbors import NearestNeighbors, KNeighborsRegressor
from catboost import CatBoostRegressor
%matplotlib inline
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
num_rows = None
EPS = 1e-100
train = pd.read_csv('/media/limbo/Home-Credit/data/application_train.csv.zip')
y = train['TARGET']
n_train = train.shape[0]
descretize = lambda x, n: list(map(str, list(pd.qcut(x, n, duplicates='drop'))))
def binary_encoder(df, n_train):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
enc = ce.BinaryEncoder(impute_missing=True, cols=categorical_columns).fit(df[0:n_train], df[0:n_train]['TARGET'])
df = enc.transform(df)
new_columns = [c for c in df.columns if c not in original_columns]
return df[new_columns]
def application_train_test(num_rows=num_rows, nan_as_category=False):
# Read data and merge
df = pd.read_csv('../data/application_train.csv.zip', nrows=num_rows)
n_train = df.shape[0]
test_df = pd.read_csv('../data/application_test.csv.zip', nrows=num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df).reset_index()
df['CODE_GENDER'].replace('XNA', np.nan, inplace=True)
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
df['NAME_FAMILY_STATUS'].replace('Unknown', np.nan, inplace=True)
df['ORGANIZATION_TYPE'].replace('XNA', np.nan, inplace=True)
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]
live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']
df['NEW_CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']
df['NEW_AMT_INCOME_TOTAL_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['NEW_CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE']
df['NEW_DOC_IND_AVG'] = df[docs].mean(axis=1)
df['NEW_DOC_IND_STD'] = df[docs].std(axis=1)
df['NEW_DOC_IND_KURT'] = df[docs].kurtosis(axis=1)
df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)
df['NEW_LIVE_IND_STD'] = df[live].std(axis=1)
df['NEW_LIVE_IND_KURT'] = df[live].kurtosis(axis=1)
df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] / (1 + df['CNT_CHILDREN'])
df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)
df['NEW_EMPLOY_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / (1 + df['AMT_INCOME_TOTAL'])
df['NEW_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
df['NEW_EXT_SOURCES_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
df['NEW_SCORES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
df['NEW_SCORES_STD'] = df['NEW_SCORES_STD'].fillna(df['NEW_SCORES_STD'].mean())
df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_PHONE_TO_EMPLOY_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
df['NEW_CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
# df['children_ratio'] = df['CNT_CHILDREN'] / df['CNT_FAM_MEMBERS']
# df['NEW_EXT_SOURCES_MEDIAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].median(axis=1)
# df['NEW_DOC_IND_SKEW'] = df[docs].skew(axis=1)
# df['NEW_LIVE_IND_SKEW'] = df[live].skew(axis=1)
# df['ind_0'] = df['DAYS_EMPLOYED'] - df['DAYS_EMPLOYED'].replace([np.inf, -np.inf], np.nan).fillna(
# df['DAYS_EMPLOYED'].dropna().median()).mean()
# df['ind_1'] = df['DAYS_EMPLOYED'] - df['DAYS_EMPLOYED'].replace([np.inf, -np.inf], np.nan).fillna(
# df['DAYS_EMPLOYED'].dropna().median()).median()
# df['ind_2'] = df['DAYS_BIRTH'] - df['DAYS_BIRTH'].replace([np.inf, -np.inf], np.nan).fillna(
# df['DAYS_BIRTH'].dropna().median()).mean()
# df['ind_3'] = df['DAYS_BIRTH'] - df['DAYS_BIRTH'].replace([np.inf, -np.inf], np.nan).fillna(
# df['DAYS_BIRTH'].dropna().median()).median()
# df['ind_4'] = df['AMT_INCOME_TOTAL'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(
# df['AMT_INCOME_TOTAL'].dropna().median()).mean()
# df['ind_5'] = df['AMT_INCOME_TOTAL'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(
# df['AMT_INCOME_TOTAL'].dropna().median()).median()
# df['ind_6'] = df['AMT_CREDIT'] - df['AMT_CREDIT'].replace([np.inf, -np.inf], np.nan).fillna(
# df['AMT_CREDIT'].dropna().median()).mean()
# df['ind_7'] = df['AMT_CREDIT'] - df['AMT_CREDIT'].replace([np.inf, -np.inf], np.nan).fillna(
# df['AMT_CREDIT'].dropna().median()).median()
# df['ind_8'] = df['AMT_ANNUITY'] - df['AMT_ANNUITY'].replace([np.inf, -np.inf], np.nan).fillna(
# df['AMT_ANNUITY'].dropna().median()).mean()
# df['ind_9'] = df['AMT_ANNUITY'] - df['AMT_ANNUITY'].replace([np.inf, -np.inf], np.nan).fillna(
# df['AMT_ANNUITY'].dropna().median()).median()
# df['ind_10'] = df['AMT_CREDIT'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(
# df['AMT_INCOME_TOTAL'].dropna().median()).mean()
# df['ind_11'] = df['AMT_CREDIT'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(
# df['AMT_INCOME_TOTAL'].dropna().median()).median()
# AGGREGATION_RECIPIES = [
# (['CODE_GENDER', 'NAME_EDUCATION_TYPE'], [('AMT_ANNUITY', 'max'),
# ('AMT_CREDIT', 'max'),
# ('EXT_SOURCE_1', 'mean'),
# ('EXT_SOURCE_2', 'mean'),
# ('OWN_CAR_AGE', 'max'),
# ('OWN_CAR_AGE', 'sum')]),
# (['CODE_GENDER', 'ORGANIZATION_TYPE'], [('AMT_ANNUITY', 'mean'),
# ('AMT_INCOME_TOTAL', 'mean'),
# ('DAYS_REGISTRATION', 'mean'),
# ('EXT_SOURCE_1', 'mean'),
# ('NEW_CREDIT_TO_ANNUITY_RATIO', 'mean')]),
# (['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], [('AMT_ANNUITY', 'mean'),
# ('CNT_CHILDREN', 'mean'),
# ('DAYS_ID_PUBLISH', 'mean')]),
# (['CODE_GENDER', 'NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], [('EXT_SOURCE_1', 'mean'),
# ('EXT_SOURCE_2',
# 'mean')]),
# (['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], [('AMT_CREDIT', 'mean'),
# ('AMT_REQ_CREDIT_BUREAU_YEAR', 'mean'),
# ('APARTMENTS_AVG', 'mean'),
# ('BASEMENTAREA_AVG', 'mean'),
# ('EXT_SOURCE_1', 'mean'),
# ('EXT_SOURCE_2', 'mean'),
# ('EXT_SOURCE_3', 'mean'),
# ('NONLIVINGAREA_AVG', 'mean'),
# ('OWN_CAR_AGE', 'mean')]),
# (['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], [('ELEVATORS_AVG', 'mean'),
# ('EXT_SOURCE_1', 'mean')]),
# (['OCCUPATION_TYPE'], [('AMT_ANNUITY', 'mean'),
# ('CNT_CHILDREN', 'mean'),
# ('CNT_FAM_MEMBERS', 'mean'),
# ('DAYS_BIRTH', 'mean'),
# ('DAYS_EMPLOYED', 'mean'),
# ('NEW_CREDIT_TO_ANNUITY_RATIO', 'median'),
# ('DAYS_REGISTRATION', 'mean'),
# ('EXT_SOURCE_1', 'mean'),
# ('EXT_SOURCE_2', 'mean'),
# ('EXT_SOURCE_3', 'mean')]),
# ]
# for groupby_cols, specs in AGGREGATION_RECIPIES:
# group_object = df.groupby(groupby_cols)
# for select, agg in specs:
# groupby_aggregate_name = '{}_{}_{}'.format('_'.join(groupby_cols), agg, select)
# df = df.merge(group_object[select]
# .agg(agg)
# .reset_index()
# .rename(index=str,
# columns={select: groupby_aggregate_name})
# [groupby_cols + [groupby_aggregate_name]],
# on=groupby_cols,
# how='left')
# ['DAYS_EMPLOYED', 'CNT_FAM_MEMBERS', 'CNT_CHILDREN', 'credit_per_person', 'cnt_non_child']
df['retirement_age'] = (df['DAYS_BIRTH'] > -14000).astype(int)
df['long_employment'] = (df['DAYS_EMPLOYED'] > -2000).astype(int)
df['cnt_non_child'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']
df['child_to_non_child_ratio'] = df['CNT_CHILDREN'] / df['cnt_non_child']
df['income_per_non_child'] = df['AMT_INCOME_TOTAL'] / df['cnt_non_child']
df['credit_per_person'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']
df['credit_per_child'] = df['AMT_CREDIT'] / (1 + df['CNT_CHILDREN'])
df['credit_per_non_child'] = df['AMT_CREDIT'] / df['cnt_non_child']
df['cnt_non_child'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']
df['child_to_non_child_ratio'] = df['CNT_CHILDREN'] / df['cnt_non_child']
df['income_per_non_child'] = df['AMT_INCOME_TOTAL'] / df['cnt_non_child']
df['credit_per_person'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']
df['credit_per_child'] = df['AMT_CREDIT'] / (1 + df['CNT_CHILDREN'])
df['credit_per_non_child'] = df['AMT_CREDIT'] / df['cnt_non_child']
# df['p_0'] = descretize(df['credit_per_non_child'].values, 2 ** 5)
# df['p_1'] = descretize(df['credit_per_person'].values, 2 ** 5)
# df['p_2'] = descretize(df['credit_per_child'].values, 2 ** 5)
# df['p_3'] = descretize(df['retirement_age'].values, 2 ** 5)
# df['p_4'] = descretize(df['income_per_non_child'].values, 2 ** 5)
# df['p_5'] = descretize(df['child_to_non_child_ratio'].values, 2 ** 5)
# df['p_6'] = descretize(df['NEW_CREDIT_TO_ANNUITY_RATIO'].values, 2 ** 5)
# df['p_7'] = descretize(df['NEW_CREDIT_TO_ANNUITY_RATIO'].values, 2 ** 6)
# df['p_8'] = descretize(df['NEW_CREDIT_TO_ANNUITY_RATIO'].values, 2 ** 7)
# df['pe_0'] = descretize(df['credit_per_non_child'].values, 2 ** 6)
# df['pe_1'] = descretize(df['credit_per_person'].values, 2 ** 6)
# df['pe_2'] = descretize(df['credit_per_child'].values, 2 ** 6)
# df['pe_3'] = descretize(df['retirement_age'].values, 2 ** 6)
# df['pe_4'] = descretize(df['income_per_non_child'].values, 2 ** 6)
# df['pe_5'] = descretize(df['child_to_non_child_ratio'].values, 2 ** 6)
c = df['NEW_CREDIT_TO_ANNUITY_RATIO'].replace([np.inf, -np.inf], np.nan).fillna(999).values
a, b = kmeans2(np.log1p(c), 2, iter=333)
df['x_0'] = b
a, b = kmeans2(np.log1p(c), 4, iter=333)
df['x_1'] = b
a, b = kmeans2(np.log1p(c), 8, iter=333)
df['x_2'] = b
a, b = kmeans2(np.log1p(c), 16, iter=333)
df['x_3'] = b
a, b = kmeans2(np.log1p(c), 32, iter=333)
df['x_4'] = b
a, b = kmeans2(np.log1p(c), 64, iter=333)
df['x_5'] = b
a, b = kmeans2(np.log1p(c), 128, iter=333)
df['x_6'] = b
a, b = kmeans2(np.log1p(c), 150, iter=333)
df['x_7'] = b
a, b = kmeans2(np.log1p(c), 256, iter=333)
df['x_8'] = b
a, b = kmeans2(np.log1p(c), 512, iter=333)
df['x_9'] = b
a, b = kmeans2(np.log1p(c), 1024, iter=333)
df['x_10'] = b
# c = df['EXT_SOURCE_1'].replace([np.inf, -np.inf], np.nan).fillna(999).values
# a, b = kmeans2(np.log1p(c), 2, iter=333)
# df['ex1_0'] = b
# a, b = kmeans2(np.log1p(c), 4, iter=333)
# df['ex1_1'] = b
# a, b = kmeans2(np.log1p(c), 8, iter=333)
# df['ex1_2'] = b
# a, b = kmeans2(np.log1p(c), 16, iter=333)
# df['ex1_3'] = b
# a, b = kmeans2(np.log1p(c), 32, iter=333)
# df['ex1_4'] = b
# a, b = kmeans2(np.log1p(c), 64, iter=333)
# df['ex1_5'] = b
# a, b = kmeans2(np.log1p(c), 128, iter=333)
# df['ex1_6'] = b
# a, b = kmeans2(np.log1p(c), 256, iter=333)
# df['ex1_7'] = b
# c = df['EXT_SOURCE_2'].replace([np.inf, -np.inf], np.nan).fillna(999).values
# a, b = kmeans2(np.log1p(c), 2, iter=333)
# df['ex2_0'] = b
# a, b = kmeans2(np.log1p(c), 4, iter=333)
# df['ex2_1'] = b
# a, b = kmeans2(np.log1p(c), 8, iter=333)
# df['ex2_2'] = b
# a, b = kmeans2(np.log1p(c), 16, iter=333)
# df['ex2_3'] = b
# a, b = kmeans2(np.log1p(c), 32, iter=333)
# df['ex2_4'] = b
# a, b = kmeans2(np.log1p(c), 64, iter=333)
# df['ex2_5'] = b
# a, b = kmeans2(np.log1p(c), 128, iter=333)
# df['ex2_6'] = b
# a, b = kmeans2(np.log1p(c), 256, iter=333)
# df['ex2_7'] = b
# c = df['EXT_SOURCE_3'].replace([np.inf, -np.inf], np.nan).fillna(999).values
# a, b = kmeans2(np.log1p(c), 2, iter=333)
# df['ex3_0'] = b
# a, b = kmeans2(np.log1p(c), 4, iter=333)
# df['ex3_1'] = b
# a, b = kmeans2(np.log1p(c), 8, iter=333)
# df['ex3_2'] = b
# a, b = kmeans2(np.log1p(c), 16, iter=333)
# df['ex3_3'] = b
# a, b = kmeans2(np.log1p(c), 32, iter=333)
# df['ex3_4'] = b
# a, b = kmeans2(np.log1p(c), 64, iter=333)
# df['ex3_5'] = b
# a, b = kmeans2(np.log1p(c), 128, iter=333)
# df['ex3_6'] = b
# a, b = kmeans2(np.log1p(c), 256, iter=333)
# df['ex3_7'] = b
# df['ex_1_0'] = descretize(df['EXT_SOURCE_1'].values, 2 ** 6)
# df['ex_2_0'] = descretize(df['EXT_SOURCE_2'].values, 2 ** 6)
# df['ex_3_0'] = descretize(df['EXT_SOURCE_3'].values, 2 ** 6)
# df['ex_1_1'] = descretize(df['EXT_SOURCE_1'].values, 2 ** 4)
# df['ex_2_1'] = descretize(df['EXT_SOURCE_2'].values, 2 ** 4)
# df['ex_3_1'] = descretize(df['EXT_SOURCE_3'].values, 2 ** 4)
# df['ex_1_2'] = descretize(df['EXT_SOURCE_1'].values, 2 ** 5)
# df['ex_2_2'] = descretize(df['EXT_SOURCE_2'].values, 2 ** 5)
# df['ex_3_2'] = descretize(df['EXT_SOURCE_3'].values, 2 ** 5)
# df['ex_1_3'] = descretize(df['EXT_SOURCE_1'].values, 2 ** 3)
# df['ex_2_4'] = descretize(df['EXT_SOURCE_2'].values, 2 ** 3)
# df['ex_3_5'] = descretize(df['EXT_SOURCE_3'].values, 2 ** 3)
# c = df['NEW_EXT_SOURCES_MEAN'].replace([np.inf, -np.inf], np.nan).fillna(999).values
# a, b = kmeans2(np.log1p(c), 2, iter=333)
# df['ex_mean_0'] = b
# a, b = kmeans2(np.log1p(c), 4, iter=333)
# df['ex_mean_1'] = b
# a, b = kmeans2(np.log1p(c), 8, iter=333)
# df['ex_mean_2'] = b
# a, b = kmeans2(np.log1p(c), 16, iter=333)
# df['ex_mean_3'] = b
# a, b = kmeans2(np.log1p(c), 32, iter=333)
# df['ex_mean_4'] = b
# a, b = kmeans2(np.log1p(c), 64, iter=333)
# df['ex_mean_5'] = b
# df['NEW_SCORES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
# df['ex1/ex2'] = df['EXT_SOURCE_1'] / df['EXT_SOURCE_2']
# df['ex1/ex3'] = df['EXT_SOURCE_1'] / df['EXT_SOURCE_3']
# df['ex2/ex3'] = df['EXT_SOURCE_3'] / df['EXT_SOURCE_3']
# df['ex1*ex2'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2']
# df['ex1*ex3'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_3']
# df['ex2*ex3'] = df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
# df['cred*ex1'] = df['AMT_CREDIT'] * df['EXT_SOURCE_1']
# df['cred*ex2'] = df['AMT_CREDIT'] * df['EXT_SOURCE_2']
# df['cred*ex3'] = df['AMT_CREDIT'] * df['EXT_SOURCE_3']
# df['cred/ex1'] = df['AMT_CREDIT'] / df['EXT_SOURCE_1']
# df['cred/ex2'] = df['AMT_CREDIT'] / df['EXT_SOURCE_2']
# df['cred/ex3'] = df['AMT_CREDIT'] / df['EXT_SOURCE_3']
# df['cred*ex123'] = df['AMT_CREDIT'] * df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
# del df['EXT_SOURCE_1']
# del df['EXT_SOURCE_2']
# del df['EXT_SOURCE_3']
# del df['NEW_EXT_SOURCES_MEAN']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
del test_df
gc.collect()
return df
df = application_train_test(num_rows=num_rows, nan_as_category=False)
df.head()
selected_features = ['AMT_ANNUITY', 'AMT_CREDIT', 'AMT_INCOME_TOTAL', 'NEW_CREDIT_TO_ANNUITY_RATIO', 'NEW_CREDIT_TO_GOODS_RATIO', 'NEW_CREDIT_TO_INCOME_RATIO'] + ['x_' + str(x) for x in range(11)] + \
['retirement_age', 'long_employment'] + ['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']
categorical_columns = [col for col in train.columns if train[col].dtype == 'object']
numerical_columns = [col for col in df.columns if df[col].dtype != 'object']
new_df = df.copy()
df = new_df
encoder = preprocessing.LabelEncoder()
for f in categorical_columns:
if df[f].dtype == 'object':
df[f] = encoder.fit_transform(df[f].apply(str).values)
categorical_columns
gc.collect()
train = pd.read_csv('../data/application_train.csv.zip', nrows=num_rows)
n_train = train.shape[0]
test = pd.read_csv('../data/application_test.csv.zip', nrows=num_rows)
new_df = pd.concat([train, test], axis=0)
gc.collect()
new_df.shape
new_df[categorical_columns].head()
encoder = preprocessing.LabelEncoder()
for f in categorical_columns:
if new_df[f].dtype == 'object':
new_df[f] = encoder.fit_transform(new_df[f].apply(str).values)
new_features = pd.read_csv('selected_features.csv', header=0, index_col=None)
new_features.head()
my_features = [f for f in selected_features if f not in new_features.columns]
my_features
new_df[categorical_columns][0:n_train].shape
new_df[categorical_columns][n_train:].head()
suresh_august16 = pd.read_csv('../data/SureshFeaturesAug16.csv', header=0, index_col=None)
suresh_august16.head()
del suresh_august16['SK_ID_CURR']
goran_features = pd.read_csv('../goran-data/goranm_feats_v3.csv', header=0, index_col=None)
goran_features.head()
del goran_features['SK_ID_CURR']
del goran_features['IS_TRAIN']
goran_features_19_8 = pd.read_csv('../data/goranm_feats_19_08.csv', header=0, index_col=None)
goran_features_19_8.head()
del goran_features_19_8['SK_ID_CURR']
from sklearn.externals import joblib
prevs_df = joblib.load('../data/prev_application_solution3_v2')
prevs_df.head()
suresh_august16_2 = pd.read_csv('../data/SureshFeaturesAug16_2.csv', header=0, index_col=None)
suresh_august15 = pd.read_csv('../data/SureshFeaturesAug15.csv', header=0, index_col=None)
suresh_august16 = pd.read_csv('../data/SureshFeaturesAug16.csv', header=0, index_col=None)
suresh_august19 = pd.read_csv('../data/suresh_features_Aug19th.csv', header=0, index_col=None)
suresh_august19_2 = pd.read_csv('../data/SureshFeatures_19_2th.csv', header=0, index_col=None)
suresh_august20 = pd.read_csv('../data/SureshFeatures3BestAgu20.csv', header=0, index_col=None)
suresh_august20.head(100)
del suresh_august15['SK_ID_CURR']
del suresh_august16_2['SK_ID_CURR']
del suresh_august19['SK_ID_CURR_SURESH']
del suresh_august16['SK_ID_CURR']
del suresh_august19_2['SK_ID_CURR']
suresh_august15.head()
suresh_20 = pd.read_csv('../data/SureshFeatures20_2.csv', header=0, index_col=None)
suresh_20.head(100)
del suresh_20['SK_ID_CURR']
goranm_8_20 = pd.read_csv('../data/goranm_08_20.csv', header=0, index_col=None)
goranm_8_20.head()
del goranm_8_20['SK_ID_CURR']
def do_countuniq( df, group_cols, counted, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
if show_agg:
print( "Counting unqiue ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].nunique().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return df
def do_mean(df, group_cols, counted, agg_name, agg_type='float32', show_max=False, show_agg=True ):
if show_agg:
print( "Calculating mean of ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].mean().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return df
def do_count(df, group_cols, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
if show_agg:
print( "Aggregating by ", group_cols , '...' )
gp = df[group_cols][group_cols].groupby(group_cols).size().rename(agg_name).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return df
counts_columns = []
for f_0 in categorical_columns:
for f_1 in [x for x in categorical_columns if x != f_0] :
df = do_countuniq(df, [f_0], f_1,
f_0 + '-' + f_1 + '_cunique', 'uint16', show_max=True); gc.collect()
counts_columns.append(f_0 + '-' + f_1 + '_cunique')
count_columns = []
for f_0 in categorical_columns:
df = do_count(df, [f_0],
f_0 + '_count', 'uint16', show_max=True); gc.collect()
count_columns.append(f_0 + '_count')
for f in ['AMT_ANNUITY', 'AMT_CREDIT', 'AMT_INCOME_TOTAL'] + ['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']:
new_df[f] = new_df[f].replace([np.inf, -np.inf], np.nan).fillna(new_df[f].replace([np.inf, -np.inf], np.nan).dropna().median())
mean_columns = []
for f_0 in categorical_columns:
for f_1 in ['AMT_ANNUITY', 'AMT_CREDIT', 'AMT_INCOME_TOTAL'] + ['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3'] :
new_df = do_mean(new_df, [f_0], f_1,
f_0 + '-' + f_1 + '_mean', 'uint16', show_max=True); gc.collect()
mean_columns.append(f_0 + '-' + f_1 + '_mean')
# train_features = pd.DataFrame(np.concatenate([df[count_columns][0:n_train].values, train_stacked.values, df[my_features][0:n_train].values, goran_features[0:n_train].values, suresh_august16[:n_train].values, suresh_august15[0:n_train].values], axis=1), columns=
# count_columns + ['y_' + str(i) for i in range(train_stacked.shape[1])] + my_features + list(goran_features.columns) + list(suresh_august16.columns) + list(suresh_august15.columns))
# test_features = pd.DataFrame(np.concatenate([df[count_columns][n_train:].values, test_stacked.values, df[my_features][n_train:].values, goran_features[n_train:].values, suresh_august16[n_train:].values, suresh_august15[n_train:].values], axis=1), columns=
# count_columns + ['y_' + str(i) for i in range(test_stacked.shape[1])] + my_features + list(goran_features.columns) + list(suresh_august16.columns) + list(suresh_august15.columns))
# train_features = np.concatenate([train_stacked.values, df[my_features][0:n_train].values, goran_features[0:n_train].values, suresh_august16[:n_train].values], axis=1)
# test_features = np.concatenate([test_stacked.values, df[my_features][n_train:].values, goran_features[n_train:].values, suresh_august16[n_train:].values], axis=1)
# train_features = pd.DataFrame(np.concatenate([train_stacked.values, df[my_features][0:n_train].values, goran_features[0:n_train].values, suresh_august16[:n_train].values, suresh_august15[0:n_train].values, suresh_august16_2[0:n_train].values], axis=1), columns=
# ['y_' + str(i) for i in range(train_stacked.shape[1])] + my_features + list(goran_features.columns) + list(suresh_august16.columns) + list(suresh_august15.columns) + list(suresh_august16_2.columns))
# test_features = pd.DataFrame(np.concatenate([test_stacked.values, df[my_features][n_train:].values, goran_features[n_train:].values, suresh_august16[n_train:].values, suresh_august15[n_train:].values, suresh_august16_2[n_train:].values], axis=1), columns=
# ['y_' + str(i) for i in range(test_stacked.shape[1])] + my_features + list(goran_features.columns) + list(suresh_august16.columns) + list(suresh_august15.columns) + list(suresh_august16_2.columns))
# train_features = pd.DataFrame(np.concatenate([train_stacked.values, df[my_features][0:n_train].values, goran_features[0:n_train].values, suresh_august19[:n_train].values, suresh_august15[0:n_train].values, prevs_df[0:n_train].values, suresh_august16[0:n_train].values, suresh_august16_2[0:n_train].values], axis=1), columns=
# ['y_' + str(i) for i in range(train_stacked.shape[1])] + my_features + list(goran_features.columns) + list(suresh_august19.columns) + list(suresh_august15.columns) + list(prevs_df.columns) + list(suresh_august16.columns) + list(suresh_august16_2.columns))
# test_features = pd.DataFrame(np.concatenate([test_stacked.values, df[my_features][n_train:].values, goran_features[n_train:].values, suresh_august19[n_train:].values, suresh_august15[n_train:].values, prevs_df[n_train:].values, suresh_august16[n_train:].values, suresh_august16_2[n_train:].values], axis=1), columns=
# ['y_' + str(i) for i in range(test_stacked.shape[1])] + my_features + list(goran_features.columns) + list(suresh_august19.columns) + list(suresh_august15.columns) + list(prevs_df.columns) + list(suresh_august16.columns) + list(suresh_august16_2.columns))
# train_features = pd.DataFrame(np.concatenate([df[count_columns][0:n_train].values, train_stacked.values, df[my_features][0:n_train].values, suresh_august19[:n_train].values, suresh_august15[0:n_train].values, prevs_df[0:n_train].values, suresh_august16[0:n_train].values, suresh_august16_2[0:n_train].values], axis=1), columns=
# count_columns + ['y_' + str(i) for i in range(train_stacked.shape[1])] + my_features + list(suresh_august19.columns) + list(suresh_august15.columns) + list(prevs_df.columns) + list(suresh_august16.columns) + list(suresh_august16_2.columns))
# test_features = pd.DataFrame(np.concatenate([df[count_columns][n_train:].values, test_stacked.values, df[my_features][n_train:].values, suresh_august19[n_train:].values, suresh_august15[n_train:].values, prevs_df[n_train:].values, suresh_august16[n_train:].values, suresh_august16_2[n_train:].values], axis=1), columns=
# count_columns + ['y_' + str(i) for i in range(test_stacked.shape[1])] + my_features + list(suresh_august19.columns) + list(suresh_august15.columns) + list(prevs_df.columns) + list(suresh_august16.columns) + list(suresh_august16_2.columns))
new_df[mean_columns][0:n_train].values
new_df[mean_columns][n_train:].values
gc.collect()
# train_features = pd.DataFrame(np.concatenate([new_df[mean_columns][0:n_train].values, suresh_august16[0:n_train].values, df[count_columns][0:n_train].values , df[counts_columns][0:n_train].values, train_stacked.values, df[my_features][0:n_train].values], axis=1), columns=
# mean_columns + list(suresh_august16.columns) + count_columns + counts_columns + ['y_' + str(i) for i in range(train_stacked.shape[1])] + my_features)
# test_features = pd.DataFrame(np.concatenate([new_df[mean_columns][n_train:].values, suresh_august16[n_train:].values, df[count_columns][n_train:].values, df[counts_columns][n_train:].values, test_stacked.values, df[my_features][n_train:].values], axis=1), columns=
# mean_columns + list(suresh_august16.columns) + count_columns + counts_columns + ['y_' + str(i) for i in range(test_stacked.shape[1])] + my_features)
# train_features = pd.DataFrame(np.concatenate([suresh_august16[0:n_train].values, df[count_columns][0:n_train].values , df[counts_columns][0:n_train].values, train_stacked.values, df[my_features][0:n_train].values], axis=1), columns=
# list(suresh_august16.columns) + count_columns + counts_columns + ['y_' + str(i) for i in range(train_stacked.shape[1])] + my_features)
# test_features = pd.DataFrame(np.concatenate([ suresh_august16[n_train:].values, df[count_columns][n_train:].values, df[counts_columns][n_train:].values, test_stacked.values, df[my_features][n_train:].values], axis=1), columns=
# list(suresh_august16.columns) + count_columns + counts_columns + ['y_' + str(i) for i in range(test_stacked.shape[1])] + my_features)
# train_features = pd.DataFrame(np.concatenate([df[categorical_columns][0:n_train].values, goran_features_19_8[0:n_train].values, suresh_august16[0:n_train].values, df[count_columns][0:n_train].values , df[counts_columns][0:n_train].values, train_stacked.values, df[my_features][0:n_train].values], axis=1), columns=
# categorical_columns + list(goran_features_19_8.columns) + list(suresh_august16.columns) + count_columns + counts_columns + ['y_' + str(i) for i in range(train_stacked.shape[1])] + my_features)
# test_features = pd.DataFrame(np.concatenate([df[categorical_columns][n_train:].values, goran_features_19_8[n_train:].values, suresh_august16[n_train:].values, df[count_columns][n_train:].values, df[counts_columns][n_train:].values, test_stacked.values, df[my_features][n_train:].values], axis=1), columns=
# categorical_columns + list(goran_features_19_8.columns) + list(suresh_august16.columns) + count_columns + counts_columns + ['y_' + str(i) for i in range(test_stacked.shape[1])] + my_features)
# train_features = pd.DataFrame(np.concatenate([goranm_8_20[0:n_train].values ,goran_features_19_8[0:n_train].values, suresh_august20[0:n_train].values, train_stacked.values, df[my_features][0:n_train].values, suresh_august16[:n_train].values, suresh_august15[0:n_train].values], axis=1), columns=
# list(goranm_8_20.columns) + list(goran_features_19_8.columns) + list(suresh_august20.columns) + ['y_' + str(i) for i in range(train_stacked.shape[1])] + my_features + list(suresh_august16.columns) + list(suresh_august15.columns))
# test_features = pd.DataFrame(np.concatenate([goranm_8_20[n_train:].values, goran_features_19_8[n_train:].values, suresh_august20[n_train:].values, test_stacked.values, df[my_features][n_train:].values, suresh_august16[n_train:].values, suresh_august15[n_train:].values], axis=1), columns=
# list(goranm_8_20.columns) + list(goran_features_19_8.columns) + list(suresh_august20.columns) + ['y_' + str(i) for i in range(test_stacked.shape[1])] + my_features + list(suresh_august16.columns) + list(suresh_august15.columns))
# train_features = pd.DataFrame(np.concatenate([goranm_8_20[0:n_train].values ,goran_features_19_8[0:n_train].values, suresh_august20[0:n_train].values, train_stacked.iloc[:, selected_features].values, df[my_features][0:n_train].values, suresh_august16[:n_train].values, suresh_august15[0:n_train].values], axis=1), columns=
# list(goranm_8_20.columns) + list(goran_features_19_8.columns) + list(suresh_august20.columns) + ['y_' + str(i) for i in selected_features] + my_features + list(suresh_august16.columns) + list(suresh_august15.columns))
# test_features = pd.DataFrame(np.concatenate([goranm_8_20[n_train:].values, goran_features_19_8[n_train:].values, suresh_august20[n_train:].values, test_stacked.iloc[:, selected_features].values, df[my_features][n_train:].values, suresh_august16[n_train:].values, suresh_august15[n_train:].values], axis=1), columns=
# list(goranm_8_20.columns) + list(goran_features_19_8.columns) + list(suresh_august20.columns) + ['y_' + str(i) for i in selected_features] + my_features + list(suresh_august16.columns) + list(suresh_august15.columns))
# train_features = pd.DataFrame(np.concatenate([goran_features_19_8[0:n_train].values, df[count_columns][0:n_train].values, train_stacked.values, df[my_features][0:n_train].values, goran_features[0:n_train].values, suresh_august16[:n_train].values, suresh_august15[0:n_train].values], axis=1), columns=
# list(goran_features_19_8.columns) + count_columns + ['y_' + str(i) for i in range(train_stacked.shape[1])] + my_features + list(goran_features.columns) + list(suresh_august16.columns) + list(suresh_august15.columns))
# test_features = pd.DataFrame(np.concatenate([goran_features_19_8[n_train:].values, df[count_columns][n_train:].values, test_stacked.values, df[my_features][n_train:].values, goran_features[n_train:].values, suresh_august16[n_train:].values, suresh_august15[n_train:].values], axis=1), columns=
# list(goran_features_19_8.columns) + count_columns + ['y_' + str(i) for i in range(test_stacked.shape[1])] + my_features + list(goran_features.columns) + list(suresh_august16.columns) + list(suresh_august15.columns))
train_features = pd.DataFrame(np.concatenate([df[counts_columns][0:n_train].values, df[count_columns][0:n_train].values ,new_df[mean_columns][0:n_train].values, prevs_df[0:n_train].values, suresh_20[0:n_train].values, goranm_8_20[0:n_train].values ,goran_features_19_8[0:n_train].values, suresh_august20[0:n_train].values, df[my_features][0:n_train].values, suresh_august16[:n_train].values, suresh_august15[0:n_train].values], axis=1), columns=
counts_columns + count_columns + mean_columns + list(prevs_df.columns) + list(suresh_20.columns) + list(goranm_8_20.columns) + list(goran_features_19_8.columns) + list(suresh_august20.columns) + my_features + list(suresh_august16.columns) + list(suresh_august15.columns))
test_features = pd.DataFrame(np.concatenate([df[counts_columns][n_train:].values, df[count_columns][n_train:].values, new_df[mean_columns][n_train:].values, prevs_df[n_train:].values, suresh_20[n_train:].values, goranm_8_20[n_train:].values, goran_features_19_8[n_train:].values, suresh_august20[n_train:].values, df[my_features][n_train:].values, suresh_august16[n_train:].values, suresh_august15[n_train:].values], axis=1), columns=
counts_columns + count_columns + mean_columns + list(prevs_df.columns) + list(suresh_20.columns) + list(goranm_8_20.columns) + list(goran_features_19_8.columns) + list(suresh_august20.columns) + my_features + list(suresh_august16.columns) + list(suresh_august15.columns))
test_features.head()
gc.collect()
cols_to_drop = [
'STCK_BERBAL_6_.',
"FLAG_DOCUMENT_2",
"FLAG_DOCUMENT_7",
"FLAG_DOCUMENT_10",
"FLAG_DOCUMENT_12",
"FLAG_DOCUMENT_13",
"FLAG_DOCUMENT_14",
"FLAG_DOCUMENT_15",
"FLAG_DOCUMENT_16",
"FLAG_DOCUMENT_17",
"FLAG_DOCUMENT_18",
"FLAG_DOCUMENT_19",
"FLAG_DOCUMENT_20",
"FLAG_DOCUMENT_21",
"PREV_NAME_CONTRACT_TYPE_Consumer_loans",
"PREV_NAME_CONTRACT_TYPE_XNA",
"PB_CNT_NAME_CONTRACT_STATUS_Amortized_debt",
"MAX_DATA_ALL",
"MIN_DATA_ALL",
"MAX_MIN_DURATION",
"MAX_AMT_CREDIT_MAX_OVERDUE",
"CC_AMT_DRAWINGS_ATM_CURRENT_MIN",
"CC_AMT_DRAWINGS_OTHER_CURRENT_MAX",
"CC_AMT_DRAWINGS_OTHER_CURRENT_MIN",
"CC_CNT_DRAWINGS_ATM_CURRENT_MIN",
"CC_CNT_DRAWINGS_OTHER_CURRENT_MAX",
"CC_CNT_DRAWINGS_OTHER_CURRENT_MIN",
"CC_SK_DPD_DEF_MIN",
"CC_SK_DPD_MIN",
"BERB_STATUS_CREDIT_TYPE_Loan_for_working_capital_replenishment",
"BERB_STATUS_CREDIT_TYPE_Real_estate_loan",
"BERB_STATUS_CREDIT_TYPE_Loan_for_the_purchase_of_equipment",
"BERB_COMBO_CT_CA_COMBO_CT_CA_Loan_for_working_capital_replenishmentClosed",
"BERB_COMBO_CT_CA_COMBO_CT_CA_Car_loanSold",
"BERB_COMBO_CT_CA_COMBO_CT_CA_Another_type_of_loanActive",
"BERB_COMBO_CT_CA_COMBO_CT_CA_Loan_for_working_capital_replenishmentSold",
"BERB_COMBO_CT_CA_COMBO_CT_CA_MicroloanSold",
"BERB_COMBO_CT_CA_COMBO_CT_CA_Another_type_of_loanSold",
"FLAG_EMAIL",
"APARTMENTS_AVG",
"AMT_REQ_CREDIT_BUREAU_MON",
"AMT_REQ_CREDIT_BUREAU_QRT",
"AMT_REQ_CREDIT_BUREAU_YEAR",
"STCK_BERBAL_6_",
"STCK_CC_6_x"]
feats = [f for f in cols_to_drop if f in train_features.columns]
train_features.drop(labels=feats, axis=1, inplace=True)
test_features.drop(labels=feats, axis=1, inplace=True)
cat_features = [] # [i for i in range(len(categorical_columns))]
gc.collect()
# train_stacked.to_csv('oofs/train_oofs-v0.1.0.csv', index=False)
# test_stacked.to_csv('oofs/test_oofs-v0.1.0.csv', index=False)
test_features.head()
train_features['nans'] = train_features.replace([np.inf, -np.inf], np.nan).isnull().sum(axis=1)
test_features['nans'] = test_features.replace([np.inf, -np.inf], np.nan).isnull().sum(axis=1)
test_file_path = "Level_1_stack/test_catb_xxx_0.csv"
validation_file_path = 'Level_1_stack/validation_catb_xxx_0.csv.csv'
num_folds = 5
# train_features = train_features.replace([np.inf, -np.inf], np.nan).fillna(-999, inplace=False)
# test_features = test_features.replace([np.inf, -np.inf], np.nan).fillna(-999, inplace=False)
gc.collect()
encoding = 'ohe'
train_df = train_features
test_df = test_features
print("Starting LightGBM. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
gc.collect()
# Cross validation model
folds = KFold(n_splits=num_folds, shuffle=True, random_state=1001)
# Create arrays and dataframes to store results
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
#feats = [col for col in feats_0 if df[col].dtype == 'object']
print(train_df[feats].shape)
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train['TARGET'])):
if encoding == 'ohe':
x_train = train_df[feats].iloc[train_idx]
#cat_features = [i for i, col in enumerate(x_train.columns) if col in categorical_cols]
x_train = x_train.replace([np.inf, -np.inf], np.nan).fillna(-999).values
x_valid = train_df[feats].iloc[valid_idx].replace([np.inf, -np.inf], np.nan).fillna(-999).values
x_test = test_df[feats].replace([np.inf, -np.inf], np.nan).fillna(-999).values
print(x_train.shape, x_valid.shape, x_test.shape)
gc.collect()
clf = CatBoostRegressor(learning_rate=0.05, iterations=2500, verbose=True, rsm=0.25,
use_best_model=True, l2_leaf_reg=40, allow_writing_files=False, metric_period=50,
random_seed=666, depth=6, loss_function='RMSE', od_wait=50, od_type='Iter')
clf.fit(x_train, train['TARGET'].iloc[train_idx].values, eval_set=(x_valid, train['TARGET'].iloc[valid_idx].values)
, cat_features=[], use_best_model=True, verbose=True)
oof_preds[valid_idx] = clf.predict(x_valid)
sub_preds += clf.predict(x_test) / folds.n_splits
print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(train['TARGET'].iloc[valid_idx].values, oof_preds[valid_idx])))
del clf
gc.collect()
sub_df = test[['SK_ID_CURR']].copy()
sub_df['TARGET'] = sub_preds
sub_df[['SK_ID_CURR', 'TARGET']].to_csv(test_file_path, index= False)
val_df = train[['SK_ID_CURR', 'TARGET']].copy()
val_df['TARGET'] = oof_preds
val_df[['SK_ID_CURR', 'TARGET']].to_csv(validation_file_path, index= False)
gc.collect()
```
| github_jupyter |
```
%pylab inline
import sys
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import arviz as az
import matplotlib.pyplot as plt
from matplotlib import rcParams
import matplotlib.font_manager as fm
rcParams['font.family'] = 'sans-serif'
sys.path.append('../')
from mederrata_spmf import PoissonMatrixFactorization
```
In this notebook, we look at the $\mathcal{M}$-open setting, where the generating process is in the span of models.
# Generate a random matrices V, W
For V, assume that 10 variables share a factor structure and the other 20 are noise
```
N = 50000
D_factor = 10
D_noise = 20
D = D_factor + D_noise
P = 3
V = np.abs(np.random.normal(1.5, 0.5, size=(P,D_factor)))
Z = np.abs(np.random.normal(0, 1, size=(N,P)))
ZV = Z.dot(V)
X = np.zeros((N, D_factor+D_noise))
X = np.random.poisson(1.,size=(N,D_noise+D_factor))
X[:, ::3] = np.random.poisson(ZV)
# Test taking in from tf.dataset, don't pre-batch
data = tf.data.Dataset.from_tensor_slices(
{
'counts': X,
'indices': np.arange(N),
'normalization': np.ones(N)
})
data = data.batch(1000)
# strategy = tf.distribute.MirroredStrategy()
strategy = None
factor = PoissonMatrixFactorization(
data, latent_dim=P, strategy=strategy,
u_tau_scale=1.0/np.sqrt(D*N),
dtype=tf.float64)
# Test to make sure sampling works
losses = factor.calibrate_advi(
num_epochs=200, learning_rate=.05)
waic = factor.waic()
print(waic)
surrogate_samples = factor.surrogate_distribution.sample(1000)
if 's' in surrogate_samples.keys():
weights = surrogate_samples['s']/tf.reduce_sum(surrogate_samples['s'],-2,keepdims=True)
intercept_data = az.convert_to_inference_data(
{
r"$\varphi_i/\eta_i$":
(tf.squeeze(surrogate_samples['w'])*weights[:,-1,:]).numpy().T})
else:
intercept_data = az.convert_to_inference_data(
{
r"$\varphi_i/\eta_i$":
(tf.squeeze(surrogate_samples['w'])).numpy().T})
fig, ax = plt.subplots(1,2, figsize=(14,8))
D = factor.feature_dim
pcm = ax[0].imshow(factor.encoding_matrix().numpy()[::-1,:], vmin=0, cmap="Blues")
ax[0].set_yticks(np.arange(D))
ax[0].set_yticklabels(np.arange(D))
ax[0].set_ylabel("item")
ax[0].set_xlabel("factor dimension")
ax[0].set_xticks(np.arange(P))
ax[0].set_xticklabels(np.arange(P))
fig.colorbar(pcm, ax=ax[0], orientation = "vertical")
az.plot_forest(intercept_data, ax=ax[1])
ax[1].set_xlabel("background rate")
ax[1].set_ylim((-0.014,.466))
ax[1].set_title("65% and 95% CI")
#plt.savefig('mix_nonlinear_factorization_sepmf.pdf', bbox_inches='tight')
plt.show()
```
| github_jupyter |
Final models with hyperparameters tuned for Logistics Regression and XGBoost with all features.
```
#Import the libraries
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn import linear_model, metrics, preprocessing, model_selection
from sklearn.preprocessing import StandardScaler
import xgboost as xgb
#Load the data
modeling_dataset = pd.read_csv('/content/drive/MyDrive/prediction/frac_cleaned_fod_data.csv', low_memory = False)
#All columns - except 'HasDetections', 'kfold', and 'MachineIdentifier'
train_features = [tf for tf in modeling_dataset.columns if tf not in ('HasDetections', 'kfold', 'MachineIdentifier')]
#The features selected based on the feature selection method earlier employed
train_features_after_selection = ['AVProductStatesIdentifier', 'Processor','AvSigVersion', 'Census_TotalPhysicalRAM', 'Census_InternalPrimaryDiagonalDisplaySizeInInches',
'Census_IsVirtualDevice', 'Census_PrimaryDiskTotalCapacity', 'Wdft_IsGamer', 'Census_IsAlwaysOnAlwaysConnectedCapable', 'EngineVersion',
'Census_ProcessorCoreCount', 'Census_OSEdition', 'Census_OSInstallTypeName', 'Census_OSSkuName', 'AppVersion', 'OsBuildLab', 'OsSuite',
'Firewall', 'IsProtected', 'Census_IsTouchEnabled', 'Census_ActivationChannel', 'LocaleEnglishNameIdentifier','Census_SystemVolumeTotalCapacity',
'Census_InternalPrimaryDisplayResolutionHorizontal','Census_HasOpticalDiskDrive', 'OsBuild', 'Census_InternalPrimaryDisplayResolutionVertical',
'CountryIdentifier', 'Census_MDC2FormFactor', 'GeoNameIdentifier', 'Census_PowerPlatformRoleName', 'Census_OSWUAutoUpdateOptionsName', 'SkuEdition',
'Census_OSVersion', 'Census_GenuineStateName', 'Census_OSBuildRevision', 'Platform', 'Census_ChassisTypeName', 'Census_FlightRing',
'Census_PrimaryDiskTypeName', 'Census_OSBranch', 'Census_IsSecureBootEnabled', 'OsPlatformSubRelease']
#Define the categorical features of the data
categorical_features = ['ProductName',
'EngineVersion',
'AppVersion',
'AvSigVersion',
'Platform',
'Processor',
'OsVer',
'OsPlatformSubRelease',
'OsBuildLab',
'SkuEdition',
'Census_MDC2FormFactor',
'Census_DeviceFamily',
'Census_PrimaryDiskTypeName',
'Census_ChassisTypeName',
'Census_PowerPlatformRoleName',
'Census_OSVersion',
'Census_OSArchitecture',
'Census_OSBranch',
'Census_OSEdition',
'Census_OSSkuName',
'Census_OSInstallTypeName',
'Census_OSWUAutoUpdateOptionsName',
'Census_GenuineStateName',
'Census_ActivationChannel',
'Census_FlightRing']
#XGBoost
def opt_run_xgboost(fold):
for col in train_features:
if col in categorical_features:
#Initialize the Label Encoder
lbl = preprocessing.LabelEncoder()
#Fit on the categorical features
lbl.fit(modeling_dataset[col])
#Transform
modeling_dataset.loc[:,col] = lbl.transform(modeling_dataset[col])
#Get training and validation data using folds
modeling_datasets_train = modeling_dataset[modeling_dataset.kfold != fold].reset_index(drop=True)
modeling_datasets_valid = modeling_dataset[modeling_dataset.kfold == fold].reset_index(drop=True)
#Get train data
X_train = modeling_datasets_train[train_features].values
#Get validation data
X_valid = modeling_datasets_valid[train_features].values
#Initialize XGboost model
xgb_model = xgb.XGBClassifier(
alpha= 1.0,
colsample_bytree= 0.6,
eta= 0.05,
gamma= 0.1,
lamda= 1.0,
max_depth= 9,
min_child_weight= 5,
subsample= 0.7,
n_jobs=-1)
#Fit the model on training data
xgb_model.fit(X_train, modeling_datasets_train.HasDetections.values)
#Predict on validation
valid_preds = xgb_model.predict_proba(X_valid)[:,1]
valid_preds_pc = xgb_model.predict(X_valid)
#Get the ROC AUC score
auc = metrics.roc_auc_score(modeling_datasets_valid.HasDetections.values, valid_preds)
#Get the precision score
pre = metrics.precision_score(modeling_datasets_valid.HasDetections.values, valid_preds_pc, average='binary')
#Get the Recall score
rc = metrics.recall_score(modeling_datasets_valid.HasDetections.values, valid_preds_pc, average='binary')
return auc, pre, rc
#Function for Logistic Regression Classification
def opt_run_lr(fold):
#Get training and validation data using folds
cleaned_fold_datasets_train = modeling_dataset[modeling_dataset.kfold != fold].reset_index(drop=True)
cleaned_fold_datasets_valid = modeling_dataset[modeling_dataset.kfold == fold].reset_index(drop=True)
#Initialize OneHotEncoder from scikit-learn, and fit it on training and validation features
ohe = preprocessing.OneHotEncoder()
full_data = pd.concat(
[cleaned_fold_datasets_train[train_features],cleaned_fold_datasets_valid[train_features]],
axis = 0
)
ohe.fit(full_data[train_features])
#transform the training and validation data
x_train = ohe.transform(cleaned_fold_datasets_train[train_features])
x_valid = ohe.transform(cleaned_fold_datasets_valid[train_features])
#Initialize the Logistic Regression Model
lr_model = linear_model.LogisticRegression(
penalty= 'l2',
C = 49.71967742639108,
solver= 'lbfgs',
max_iter= 300,
n_jobs=-1
)
#Fit model on training data
lr_model.fit(x_train, cleaned_fold_datasets_train.HasDetections.values)
#Predict on the validation data using the probability for the AUC
valid_preds = lr_model.predict_proba(x_valid)[:, 1]
#For precision and Recall
valid_preds_pc = lr_model.predict(x_valid)
#Get the ROC AUC score
auc = metrics.roc_auc_score(cleaned_fold_datasets_valid.HasDetections.values, valid_preds)
#Get the precision score
pre = metrics.precision_score(cleaned_fold_datasets_valid.HasDetections.values, valid_preds_pc, average='binary')
#Get the Recall score
rc = metrics.recall_score(cleaned_fold_datasets_valid.HasDetections.values, valid_preds_pc, average='binary')
return auc, pre, rc
#A list to hold the values of the XGB performance metrics
xg = []
for fold in tqdm(range(10)):
xg.append(opt_run_xgboost(fold))
#Run the Logistic regression model for all folds and hold their values
lr = []
for fold in tqdm(range(10)):
lr.append(opt_run_lr(fold))
xgb_auc = []
xgb_pre = []
xgb_rc = []
lr_auc = []
lr_pre = []
lr_rc = []
#Loop to get each of the performance metric for average computation
for i in lr:
lr_auc.append(i[0])
lr_pre.append(i[1])
lr_rc.append(i[2])
for j in xg:
xgb_auc.append(i[0])
xgb_pre.append(i[1])
xgb_rc.append(i[2])
#Dictionary to hold the basic model performance data
final_model_performance2 = {"logistic_regression": {"auc":"", "precision":"", "recall":""},
"xgb": {"auc":"","precision":"","recall":""}
}
#Calculate average of each of the lists of performance metrics and update the dictionary
final_model_performance2['logistic_regression'].update({'auc':sum(lr_auc)/len(lr_auc)})
final_model_performance2['xgb'].update({'auc':sum(xgb_auc)/len(xgb_auc)})
final_model_performance2['logistic_regression'].update({'precision':sum(lr_pre)/len(lr_pre)})
final_model_performance2['xgb'].update({'precision':sum(xgb_pre)/len(xgb_pre)})
final_model_performance2['logistic_regression'].update({'recall':sum(lr_rc)/len(lr_rc)})
final_model_performance2['xgb'].update({'recall':sum(xgb_rc)/len(xgb_rc)})
final_model_performance2
```
| github_jupyter |
Imaging you are a metal toy producer and wan't to package your product automaticaly.
In this case it would be nice to categorise your products without much effort.
In this example we use a pretrained model ('Xception' with 'imagenet' dataset).
## Import dependencies
```
import warnings
warnings.filterwarnings('ignore')
import sys
import pathlib
current_path = pathlib.Path().absolute()
root_path = "{0}/..".format(current_path)
sys.path.append("{0}/src".format(root_path))
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.applications.xception import Xception
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
from tensorflow.keras.models import Model
import backbones
import utils.plots as plots
from train_engine import TrainEngine
from utils import load_dataset, ImageGeneratorConfig, setup_environment, export_util
setup_environment(enable_gpu=True)
```
## Prepare training and evaluation
As we have only few images, we need to augment them to get more input for our neuronal network.
```
train_files_path = "{0}/img/space_ships/train".format(root_path)
eval_files_path = "{0}/img/space_ships/eval".format(root_path)
input_shape = (138, 256, 3)
generator_config = ImageGeneratorConfig()
generator_config.loop_count = 10
generator_config.horizontal_flip = True
generator_config.zoom_range = 0.5
generator_config.width_shift_range = 0.03
generator_config.height_shift_range = 0.03
generator_config.rotation_range = 180
train_x, train_y, eval_x, eval_y = load_dataset(
train_files_path, input_shape, validation_split=0.1
)
number_of_classes = 3
```
## Create model
```
base_model = Xception(include_top=False, weights='imagenet', input_shape=input_shape)
base_layers_count = len(base_model.layers)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dense(number_of_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=x)
optimizer = Adam(lr=0.001)
```
## Train model
First we will teach the model the new classes.
```
for layer in base_model.layers:
layer.trainable = False
train_engine = TrainEngine(
input_shape,
model,
optimizer,
loss="sparse_categorical_crossentropy"
)
```
### Train
```
loss, acc, val_loss, val_acc = train_engine.train(
train_x,
train_y,
eval_x,
eval_y,
epochs=70,
batch_size=32,
image_generator_config=generator_config,
is_augment_y_enabled=False,
is_classification=True
)
```
### Show history
```
plots.plot_history(loss, acc, val_loss, val_acc)
```
Now we fine tune the convolutional layers from the base model.
This will remove connections between neurons that are not used and also create new ones.
```
for layer in base_model.layers[:base_layers_count]:
layer.trainable = False
for layer in model.layers[base_layers_count:]:
layer.trainable = True
optimizer = SGD(lr=0.0001, momentum=0.9)
train_engine = TrainEngine(
input_shape,
model,
optimizer,
loss="sparse_categorical_crossentropy"
)
loss, acc, val_loss, val_acc = train_engine.train(
train_x,
train_y,
eval_x,
eval_y,
epochs=20,
batch_size=32,
image_generator_config=generator_config,
is_augment_y_enabled=False,
is_classification=True
)
```
## Predict
```
classes = ['Millenium Falcon', 'Pelican', 'TIE Fighter']
x, _, _, _ = load_dataset(
eval_files_path, input_shape, validation_split=0
)
for idx in range(len(x[:3])):
predictions = train_engine.model.predict(
np.array([x[idx]], dtype=np.float32), batch_size=1
)
plots.plot_classification(predictions, [x[idx]], input_shape, classes)
```
### Export model
```
export_path = "{0}/saved_models/space_ships".format(root_path)
export_util.export_model(model, export_path)
```
## Cleanup
```
K.clear_session()
```
| github_jupyter |
```
%matplotlib inline
```
# Linear classifier on sensor data with plot patterns and filters
Decoding, a.k.a MVPA or supervised machine learning applied to MEG and EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable [1] than the classifier filters (weight vectors).
The patterns explain how the MEG and EEG data were generated from the
discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
[1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,
Blankertz, B., & Bießmann, F. (2014). On the interpretation of
weight vectors of linear models in multivariate neuroimaging.
NeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067
```
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Romain Trachel <trachelr@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
```
Set parameters
```
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
decim=4, baseline=None, preload=True)
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.copy().pick_types(meg=True, eeg=False)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
eeg_epochs = epochs.copy().pick_types(meg=False, eeg=True)
eeg_data = eeg_epochs.get_data().reshape(len(labels), -1)
```
Decoding in sensor space using a LogisticRegression classifier
```
clf = LogisticRegression()
sc = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = sc.fit_transform(meg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(meg_epochs.info, title='MEG Patterns')
model.plot_filters(meg_epochs.info, title='MEG Filters')
# fit the classifier on EEG data
X = sc.fit_transform(eeg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(eeg_epochs.info, title='EEG Patterns')
model.plot_filters(eeg_epochs.info, title='EEG Filters')
```
| github_jupyter |
```
# CHALLENGE PROBLEM:
#
# Use your check_sudoku function as the basis for solve_sudoku(): a
# function that takes a partially-completed Sudoku grid and replaces
# each 0 cell with an integer in the range 1..9 in such a way that the
# final grid is valid.
#
# There are many ways to cleverly solve a partially-completed Sudoku
# puzzle, but a brute-force recursive solution with backtracking is a
# perfectly good option. The solver should return None for broken
# input, False for inputs that have no valid solutions, and a valid
# 9x9 Sudoku grid containing no 0 elements otherwise. In general, a
# partially-completed Sudoku grid does not have a unique solution. You
# should just return some member of the set of solutions.
#
# A solve_sudoku() in this style can be implemented in about 16 lines
# without making any particular effort to write concise code.
# solve_sudoku should return None
ill_formed = [[5,3,4,6,7,8,9,1,2],
[6,7,2,1,9,5,3,4,8],
[1,9,8,3,4,2,5,6,7],
[8,5,9,7,6,1,4,2,3],
[4,2,6,8,5,3,7,9], # <---
[7,1,3,9,2,4,8,5,6],
[9,6,1,5,3,7,2,8,4],
[2,8,7,4,1,9,6,3,5],
[3,4,5,2,8,6,1,7,9]]
# solve_sudoku should return valid unchanged
valid = [[5,3,4,6,7,8,9,1,2],
[6,7,2,1,9,5,3,4,8],
[1,9,8,3,4,2,5,6,7],
[8,5,9,7,6,1,4,2,3],
[4,2,6,8,5,3,7,9,1],
[7,1,3,9,2,4,8,5,6],
[9,6,1,5,3,7,2,8,4],
[2,8,7,4,1,9,6,3,5],
[3,4,5,2,8,6,1,7,9]]
# solve_sudoku should return False
invalid = [[5,3,4,6,7,8,9,1,2],
[6,7,2,1,9,5,3,4,8],
[1,9,8,3,8,2,5,6,7], # <------ 8 appears two times
[8,5,9,7,6,1,4,2,3],
[4,2,6,8,5,3,7,9,1],
[7,1,3,9,2,4,8,5,6],
[9,6,1,5,3,7,2,8,4],
[2,8,7,4,1,9,6,3,5],
[3,4,5,2,8,6,1,7,9]]
# solve_sudoku should return a
# sudoku grid which passes a
# sudoku checker. There may be
# multiple correct grids which
# can be made from this starting
# grid.
easy = [[2,9,0,0,0,0,0,7,0],
[3,0,6,0,0,8,4,0,0],
[8,0,0,0,4,0,0,0,2],
[0,2,0,0,3,1,0,0,7],
[0,0,0,0,8,0,0,0,0],
[1,0,0,9,5,0,0,6,0],
[7,0,0,0,9,0,0,0,1],
[0,0,1,2,0,0,3,0,6],
[0,3,0,0,0,0,0,5,9]]
# Note: this may timeout
# in the Udacity IDE! Try running
# it locally if you'd like to test
# your solution with it.
hard = [[1,0,0,0,0,7,0,9,0],
[0,3,0,0,2,0,0,0,8],
[0,0,9,6,0,0,5,0,0],
[0,0,5,3,0,0,9,0,0],
[0,1,0,0,8,0,0,0,2],
[6,0,0,0,0,4,0,0,0],
[3,0,0,0,0,0,0,1,0],
[0,4,0,0,0,0,0,0,7],
[0,0,7,0,0,0,3,0,0]]
import copy
def columns(grid):
'''Helper function that transposes grid and returns a list of columns'''
return [[row[i] for row in grid] for i in range(9)]
def subgrids(grid):
'''Helper function that returns a list of the nine 3x3 sub-grids in horizontal ordering'''
return [[grid[i+m][j+k] for m in range(3) for k in range(3)] for i in range(0, 7, 3) for j in range(0, 7, 3)]
def check_sudoku(grid):
"""Sudoku puzzle validity checker function.
Returns None if input is not a 9x9 grid of the form [[int,..],... []] where int is an integer from 0 to 9.
Returns False if the grid is not a valid Sudoku puzzle and True otherwise.
Zeros are accepted as representing unsolved cells."""
# input validity checks
if not isinstance(grid, list):
return None
elif not len(grid) == 9:
return None
elif not all([isinstance(row, list) for row in grid]):
return None
elif not all([len(row) == 9 for row in grid]):
return None
elif not all([isinstance(char, int) for row in grid for char in row]):
return None
elif not all([(0 <= char <= 9) for row in grid for char in row]):
return None
# create list of checks for rows, columns and sub_grinds
checks = [grid, columns(grid), subgrids(grid)]
for check in checks:
if not all([row.count(num) <= 1 for row in check for num in range(1, 10)]):
return False
return True
def create_puzzle(grid):
"""Helper function that takes an unsolved grid and replaces zeros with the set {1, ..., 9}.
Solved cells are transformed to one element sets."""
puzzle = [[], [], [], [], [], [], [], [], []]
for counter, row in enumerate(grid):
for elem in row:
if elem == 0:
puzzle[counter].append(set(range(1, 10)))
else:
puzzle[counter].append(set([elem]))
return puzzle
def convert_to_grid(puzzle):
"""Helper function to convert a sudoku puuzle of the format [[{}, ...], ... , [{}, ...]]
back to the original format of [[int, ..., int], ..., [int, ..., int]]."""
grid = [[], [], [], [], [], [], [], [], []]
for i, row in enumerate(puzzle):
for j, elem in enumerate(row):
if len(elem) == 1:
grid[i].append(next(iter(elem)))
else:
grid[i].append(0)
return grid
def solved_cells(puzzle):
"""Helper function that counts solved cell in a sodoku grid, where each cell is a set"""
return sum([1 for row in puzzle for elem in row if len(elem) == 1])
def first_stage_solve(puzzle):
"""Helper function that eliminates possible values from unsolved cells, based on existing solved
cells in corresponding row, column and subgrid and returns the result."""
result = [[], [], [], [], [], [], [], [], []]
for i, row in enumerate(puzzle):
for j, cell in enumerate(row):
if len(cell) > 1:
# construct a set of solved cell values in row i
solved_row_values = set([]).union(*[elem for elem in row if len(elem) == 1])
# construct a set of solved cell values in column j
solved_column_values = set([]).union(*[elem for elem in columns(puzzle)[j] if len(elem) == 1])
# construct a set of solved cell values in corresponding subgrid
solved_subgrid_values = set([]).union(*[elem for elem in subgrids(puzzle)[3*(i//3) + j//3] if len(elem) == 1])
# construct the set of not permitted values for cell
not_permitted = solved_row_values | solved_column_values | solved_subgrid_values
# eliminate not permitted values and append to result
result[i].append(cell.difference(not_permitted))
else:
result[i].append(cell)
return result
def second_stage_solve(puzzle):
"""Helper function that solves unsolved cells, by checking for uniqueness of allowed values
for the cell in corresponding row and column and returns the result."""
# check rows
for i, row in enumerate(puzzle):
for j, cell in enumerate(row):
if len(cell) > 1:
other_cell_values = set([]).union(*[elem for counter, elem in enumerate(row) if counter != j])
for value in cell:
if not value in other_cell_values:
puzzle[i][j] = set([value])
break
# check columns
for j, column in enumerate(columns(puzzle)):
for i, cell in enumerate(column):
if len(cell) > 1:
other_cell_values = set([]).union(*[elem for counter, elem in enumerate(column) if counter != i])
for value in cell:
if not value in other_cell_values:
puzzle[i][j] = set([value])
break
return puzzle
def pairs_of_two(row_column_subgrid):
"""Helper function that returns the positions of the first pair of identical 2-sets found in input
row, column or subgrid."""
for elem in row_column_subgrid:
if len(elem) == 2 and row_column_subgrid.count(elem) == 2:
i = row_column_subgrid.index(elem)
j = i + 1 + row_column_subgrid[i+1: ].index(elem)
return (i, j)
else:
return None
def third_stage_solve(puzzle):
"""Helper function that scans each row, column and subgrid for pairs of cells with same 2-set
of possible solutions and eliminates these values from remaining cells of the corresponding
row, column or subgrid."""
# scan rows
for i, row in enumerate(puzzle):
pair_positions = pairs_of_two(row)
if pair_positions:
for j, cell in enumerate(row):
if j not in pair_positions:
puzzle[i][j] = cell.difference(row[pair_positions[0]])
# scan columns
for j, column in enumerate(columns(puzzle)):
pair_positions = pairs_of_two(column)
if pair_positions:
for i, cell in enumerate(column):
if i not in pair_positions:
puzzle[i][j] = cell.difference(column[pair_positions[0]])
# scan subgrids
for i, subgrid in enumerate(subgrids(puzzle)):
pair_positions = pairs_of_two(subgrid)
if pair_positions:
for j, cell in enumerate(subgrid):
if j not in pair_positions:
puzzle[3*(i//3) + j//3][3*(i%3) + j%3] = cell.difference(subgrid[pair_positions[0]])
return puzzle
def logic_solve(puzzle):
"""Helper function that attempts to solve the puzzle using logic"""
while True:
solved_cells_at_start = solved_cells(puzzle)
puzzle = third_stage_solve(second_stage_solve(first_stage_solve(puzzle)))
solved_cells_at_end = solved_cells(puzzle)
if solved_cells_at_end == solved_cells_at_start:
return puzzle
def backtracking_solve(puzzle):
"""Helper function that solves the puzzle recursively using backtracking."""
for i, row in enumerate(puzzle):
for j, cell in enumerate(row):
if len(cell) > 1:
for value in cell:
# puzzle is a compound list, so a slice copy wouldn't work
# here as the original puzzle would be affected
possible_solution = copy.deepcopy(puzzle)
possible_solution[i][j] = set([value])
possible_solution = logic_solve(possible_solution)
if solved_cells(possible_solution) == 81:
if check_sudoku(convert_to_grid(possible_solution)):
return possible_solution
else:
continue
elif not all([len(elem) > 0 for row in possible_solution for elem in row]):
continue
else:
possible_solution = backtracking_solve(possible_solution)
if possible_solution is not None:
return possible_solution
else:
continue
# Possible values for cell exhausted. Either we backtrack or the cell examined
# was the last one and the puzzle has no solution
return None
def solve_sudoku(grid):
if check_sudoku(grid) is None:
return None
elif not check_sudoku(grid):
return False
elif all([elem != 0 for row in grid for elem in row]):
return grid
puzzle = logic_solve(create_puzzle(grid))
if solved_cells(puzzle) == 81:
result = convert_to_grid(puzzle)
assert check_sudoku(result)
return result
elif not all([len(elem) > 0 for row in puzzle for elem in row]):
return False
else:
puzzle = backtracking_solve(puzzle)
if puzzle is None:
return False
else:
result = convert_to_grid(puzzle)
assert check_sudoku(result)
return result
# print solve_sudoku(ill_formed) # --> None
# print solve_sudoku(valid) # --> True
# print solve_sudoku(invalid) # --> False
test1 = [[8,0,0,0,0,0,0,0,0],
[0,0,3,6,0,0,0,0,0],
[0,7,0,0,9,0,2,0,0],
[0,5,0,0,0,7,0,0,0],
[0,0,0,0,4,5,7,0,0],
[0,0,0,1,0,0,0,3,0],
[0,0,1,0,0,0,0,6,8],
[0,0,8,5,0,0,0,1,0],
[0,9,0,0,0,0,4,0,0]]
test2 = [[7,0,0,2,0,0,0,0,0],
[8,0,0,0,0,0,0,4,0],
[0,0,0,1,0,0,0,0,0],
[0,1,6,5,0,0,2,0,0],
[0,0,0,0,0,0,0,7,0],
[0,2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,6,0,1],
[3,0,0,0,4,0,0,0,0],
[0,5,0,0,8,0,0,0,0]]
for row in solve_sudoku(easy): # --> True
print row
print ''
for row in solve_sudoku(test1): # --> True
print row
```
| github_jupyter |
```
import librosa
SAMPLE_1_FILE = 'data/sample1.wav'
SAMPLE_2_FILE = 'data/sample2.wav'
filename = librosa.example('nutcracker')
print(filename)
# y, sr = librosa.load(filename)
y, sr = librosa.load(SAMPLE_1_FILE)
print('waveform (y): ', type(y), len(y))
print(f'sampling rate {sr}')
# beat tracker
tempo, beat_frames = librosa.beat.beat_track(y, sr)
print('Estimated tempo: {:.2f} beats per minute'.format(tempo))
# 4. Convert the frame indices of beat events into timestamps
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
beat_times
# https://www.analyticsvidhya.com/blog/2021/06/visualizing-sounds-librosa/
%matplotlib inline
import matplotlib.pylab as plt
import numpy as np
x = np.linspace(-np.pi, np.pi, 201)
plt.plot(x, np.sin(x))
plt.xlabel('Angle [rad]')
plt.ylabel('sin(x)')
plt.axis('tight')
plt.show()
# Short-Tiem Fourier Transform
D = librosa.stft(y)
s = np.abs(librosa.stft(y)**2) # Get magnitude of stft
# Chroma is a 12-element vector that measures energy from the sound pitch.
chroma = librosa.feature.chroma_stft(S=s, sr=sr)
chroma.shape
%matplotlib inline
# increase figure size
plt.rcParams['figure.figsize'] = [12, 8]
plt.rcParams['figure.dpi'] = 100
features = chroma.shape[0]
fig = plt.figure()
gs = fig.add_gridspec(features, hspace=0)
axs = gs.subplots(sharex=True, sharey=True)
for i in range(features):
axs[i].plot(chroma[i].transpose()[:1000])
plt.figure(figsize=(9,12))
plt.show()
chroma_sum = np.cumsum(chroma)
chroma_sum.shape
%matplotlib inline
x = np.linspace(-chroma_sum, chroma_sum)
plt.plot(x, np.sin(x))
plt.xlabel('Angle [rad]')
plt.ylabel('sin(x)')
plt.axis('tight')
plt.show()
from librosa.display import specshow
# enhanced Chroma and Chroma variants
# Constant-Q, type of graph to visualize chroma measurements, uses logartihmically spaced frquency axis
# to display sound in decibels
chroma_orig = librosa.feature.chroma_cqt(y=y, sr=sr)
chroma_orig.shape
# For display purposes, let's zoom in on a 15-second chunk from the middle of the song
start = 0
end = 2
idx = tuple([slice(None), slice(*list(librosa.time_to_frames([start, end])))])
# And for comparison, we'll show the CQT matrix as well.
C = np.abs(librosa.cqt(y=y, sr=sr, bins_per_octave=12*3, n_bins=7*12*3))
fig, ax = plt.subplots(nrows=2, sharex=True)
img1 = specshow(librosa.amplitude_to_db(C, ref=np.max)[idx],
y_axis='cqt_note', x_axis='time', bins_per_octave=12*3,
ax=ax[0])
fig.colorbar(img1, ax=[ax[0]], format="%+2.f dB")
ax[0].label_outer()
img2 = specshow(chroma_orig[idx], y_axis='chroma', x_axis='time', ax=ax[1])
fig.colorbar(img2, ax=[ax[1]])
ax[1].set(ylabel='Default chroma')
```
| github_jupyter |
```
import torch
import sim_data_gen
import numpy as np
import dr_crn
import matplotlib.pyplot as plt
n_feat = 5
def get_mmd(x_train):
feat = x_train[:, :n_feat]
causes = x_train[:, n_feat:]
cause_ind = sim_data_gen.cause_to_num(causes)
uniques, counts = np.unique(cause_ind, return_counts=True)
uniques = uniques[counts > 1]
mmd_sigma = 1
mmd = 0
for i in range(len(uniques)):
x1 = torch.tensor(feat[cause_ind == uniques[i]])
x2 = torch.tensor(feat[cause_ind != uniques[i]])
mmd = mmd + torch.abs(dr_crn.mmd2_rbf(x1, x2, mmd_sigma))
return mmd
scp_list = []
scp_list_sd = []
for k in [1,2,3,4,5]:
k = k * 2
config_key = 'ea_balance_{}'.format(k)
model_id='SCP'
seed_list = []
for seed in [1, 2, 3, 4, 5]:
x_train = torch.load('model/simulation_overlap/{}_{}_{}_x.pth'.format(config_key, model_id, seed))
x_train = x_train.cpu().numpy()
m = get_mmd(x_train)
seed_list.append(m)
seed_list = np.array(seed_list)
m = seed_list.mean()
sd = seed_list.std()
scp_list.append(m)
scp_list_sd.append(sd)
base_line_list = []
base_line_list_sd = []
for k in [1,2,3,4,5]:
k = k * 2
config_key = 'ea_balance_{}'.format(k)
model_id='IPW'
seed_list = []
for seed in [1, 2, 3, 4, 5]:
x_train = torch.load('model/simulation_overlap/{}_{}_{}_x.pth'.format(config_key, model_id, seed))
x_train = x_train.cpu().numpy()
causes = x_train[:, n_feat:]
m = get_mmd(x_train)
seed_list.append(m)
seed_list = np.array(seed_list)
m = seed_list.mean()
sd = seed_list.std()
base_line_list.append(m)
base_line_list_sd.append(sd)
baseline = np.array(base_line_list)
scp = np.array(scp_list)
baseline_sd = np.array(base_line_list_sd)
scp_sd = np.array(scp_list_sd)
plt.style.use('tableau-colorblind10')
plt.rcParams['font.size'] = '13'
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.figure(figsize=(5,3))
width = 0.4
plt.bar(np.arange(1,6)-0.2, baseline,yerr=base_line_list_sd, color=colors[0], width=width, alpha=0.7, label='Observational')
plt.bar(np.arange(1,6)+0.2, scp,yerr=scp_list_sd, color=colors[1], width=width, alpha=0.7, label = 'SCP Augmented')
plt.xlabel(r'Confounding level $|v_m|$', fontsize=14)
plt.ylabel('Distance: $b$', fontsize=16)
plt.legend()
plt.title(r'Balancing of the dataset (smaller better)', fontsize=14)
plt.tight_layout(pad=0.2)
plt.savefig(fname='Fig5_A.png', dpi=300)
import pandas as pds
from scipy.special import comb
plt.style.use('tableau-colorblind10')
plt.rcParams['font.size'] = '13'
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
df_base = pds.read_csv('results/results_ea_baseline.txt', sep=' ', header=None)
weights = np.array([comb(5, i) for i in range(1, 6)])
x_ref = np.sum(np.arange(1,6) * weights) / np.sum(weights)
y_ref = np.interp(x_ref, np.arange(1, 6), df_base[2].values)
x_ref_scp = 1 + 0.1 * (np.sum(np.arange(5))) / 5
x_ref_scp
y_ref_scp = np.interp(x_ref_scp, np.arange(1, 6), df_base[2].values)
prefix=''
dat = pds.read_csv('results{}/results_ea.txt'.format(prefix), sep=' ', header=None)
dat[4] = dat[4] / np.sqrt(32)
dat[5] = dat[5] / np.sqrt(32)
dat = dat.sort_values(1)
dat.tail(10)
dat1 = dat[dat[0] == 'SCP']
dat2 = dat[dat[0] == 'FB']
z_ref_scp = np.interp(y_ref_scp, np.arange(7) / 10, dat1[4].values)
plt.figure(figsize=(5,3))
plt.fill_between(dat1[1], dat1[4] - 2 * dat1[5], dat1[4] + 2 * dat1[5], alpha=0.3, color=colors[0])
plt.plot(dat1[1], dat1[4], '-o', color=colors[0], label='SCP')
plt.plot([0, 0.6], [1.533/ np.sqrt(32), 1.533/ np.sqrt(32)], ls='--', c=colors[3], label='No Aug.', linewidth=3)
plt.axvline(y_ref_scp, ymax=0.3, ls='--', c=colors[1], linewidth=3)
plt.title(r'SCP Final Prediction Error (RMSE)', fontsize=14)
plt.xlabel(r'Simulated Step One Error $\xi$', fontsize=14)
plt.ylabel('RMSE', fontsize=14)
plt.text(0.1, 0.275, 'NN Baseline', fontsize=14)
plt.text(0.21, 0.18, 'Actual step one error', fontsize=14, c=colors[1])
plt.tight_layout(pad=0.1)
plt.savefig(fname='Fig5_B.png', dpi=300)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.