text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
# Import required libraries
import pandas as pd
import matplotlib.pyplot as plt
import re
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import string
import nltk
import warnings
%matplotlib inline
warnings.filterwarnings("ignore", category=DeprecationWarning)
from nltk.corpus import stopwords
stop = stopwords.words('english')
dat = pd.read_csv('goodreads.tsv' , sep='\t')
print(dat.shape)
dat.head()
#Dropping Null Values
df=dat.dropna()
df
#function to represent sentiment -1(negetive);0(neutral);1(positive)
def sentiment(n):
return 1 if n>=4 else (-1 if n<=2 else 0)
#Applying Sentiment Function
df['sent'] = df['rating'].apply(sentiment)
df.head()
Sentiment_count=df.groupby('sent').count()
Sentiment_count
for i in range(0,len(df)-1):
if type(df.iloc[i]['body']) != str:
df.iloc[i]['body'] = str(df.iloc[i]['body'])
df.shape
x=df['body']
y=df['sent']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y,test_size=0.2, random_state=0)
import re
def clean_text(text):
# remove HTML tags
text = re.sub(r'<.*?>', '', text)
# remove the characters [\], ['] and ["]
text = re.sub(r"\\", "", text)
text = re.sub(r"\'", "", text)
text = re.sub(r"\"", "", text)
# convert text to lowercase
text = text.strip().lower()
# replace punctuation characters with spaces
filters='!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, " ") for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
return text
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(stop_words="english",preprocessor=clean_text)
X_train_CV = vectorizer.fit_transform(X_train)
X_test_CV = vectorizer.transform(X_test)
X_train_CV
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_CV)
X_train_tfidf.shape
X_train_tfidf
X_test_tfidf = tfidf_transformer.transform(X_test_CV)
X_test_tfidf
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
clf = SGDClassifier(loss="log", penalty="l2") #logistic Regression
clf.fit(X_train_CV, y_train)
clf
y_predic = clf.predict(X_test_CV)
y_predic
from sklearn.metrics import confusion_matrix,accuracy_score,classification_report
clas = classification_report(y_test, y_predic)
clas
con = confusion_matrix(y_test, y_predic)
con
acc = accuracy_score(y_test,y_predic)
acc
print(acc*100)
clf = SGDClassifier(loss="hinge", penalty="l2") #svm
clf.fit(X_train_tfidf, y_train)
clf
y_pred = clf.predict(X_test_tfidf)
y_pred
ac = accuracy_score(y_test,y_pred)
ac
print(acc*100)
from sklearn.model_selection import GridSearchCV
params = {
"loss" : ["hinge", "log", "squared_hinge", "modified_huber", "perceptron"],
"alpha" : [0.0001, 0.001, 0.01, 0.1],
"penalty" : ["l2", "l1", "elasticnet", "none"],
}
clf = SGDClassifier(max_iter=100)
grid = GridSearchCV(clf, param_grid=params, cv=10)
grid.fit(X_train_tfidf, y_train)
print(grid.best_params_)
grid_predictions = grid.predict(X_test_tfidf)
grid_predictions
print('Accuracy: {:.2f}'.format(accuracy_score(y_test, grid_predictions)*100))
grid_train_predictions = grid.predict(X_train_tfidf)
print('Accuracy: {:.2f}'.format(accuracy_score(y_train, grid_train_predictions)*100))
```
| github_jupyter |
```
# header files needed
import numpy as np
import torch
import torch.nn as nn
import torchvision
from google.colab import drive
drive.mount('/content/drive')
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
# define transforms
train_transforms = torchvision.transforms.Compose([torchvision.transforms.RandomRotation(30),
torchvision.transforms.Resize((224, 224)),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
# get data
train_data = torchvision.datasets.ImageFolder("/content/drive/My Drive/train_images/", transform=train_transforms)
val_data = torchvision.datasets.ImageFolder("/content/drive/My Drive/val_images/", transform=train_transforms)
print(len(train_data))
print(len(val_data))
# data loaders
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=32, shuffle=True, num_workers=16, pin_memory=True)
val_loader = torch.utils.data.DataLoader(dataset=val_data, batch_size=32, shuffle=True, num_workers=16, pin_memory=True)
# model class
class VGG16_CBAM(torch.nn.Module):
# init function
def __init__(self, model, num_classes=2):
super().__init__()
# pool layer
self.pool = torch.nn.Sequential(torch.nn.MaxPool2d(kernel_size=2, stride=2))
# spatial attention
self.spatial_attention = torch.nn.Sequential(
torch.nn.Conv2d(2, 1, kernel_size=7, padding=3, stride=1),
torch.nn.BatchNorm2d(1),
torch.nn.Sigmoid()
)
# channel attention
self.max_pool_1 = torch.nn.Sequential(torch.nn.MaxPool2d(kernel_size=224, stride=224))
self.max_pool_2 = torch.nn.Sequential(torch.nn.MaxPool2d(kernel_size=112, stride=112))
self.max_pool_3 = torch.nn.Sequential(torch.nn.MaxPool2d(kernel_size=56, stride=56))
self.max_pool_4 = torch.nn.Sequential(torch.nn.MaxPool2d(kernel_size=28, stride=28))
self.max_pool_5 = torch.nn.Sequential(torch.nn.MaxPool2d(kernel_size=14, stride=14))
self.avg_pool_1 = torch.nn.Sequential(torch.nn.AvgPool2d(kernel_size=224, stride=224))
self.avg_pool_2 = torch.nn.Sequential(torch.nn.AvgPool2d(kernel_size=112, stride=112))
self.avg_pool_3 = torch.nn.Sequential(torch.nn.AvgPool2d(kernel_size=56, stride=56))
self.avg_pool_4 = torch.nn.Sequential(torch.nn.AvgPool2d(kernel_size=28, stride=28))
self.avg_pool_5 = torch.nn.Sequential(torch.nn.AvgPool2d(kernel_size=14, stride=14))
# features
self.features_1 = torch.nn.Sequential(*list(model.features.children())[:3])
self.features_2 = torch.nn.Sequential(*list(model.features.children())[3:6])
self.features_3 = torch.nn.Sequential(*list(model.features.children())[7:10])
self.features_4 = torch.nn.Sequential(*list(model.features.children())[10:13])
self.features_5 = torch.nn.Sequential(*list(model.features.children())[14:17])
self.features_6 = torch.nn.Sequential(*list(model.features.children())[17:20])
self.features_7 = torch.nn.Sequential(*list(model.features.children())[20:23])
self.features_8 = torch.nn.Sequential(*list(model.features.children())[24:27])
self.features_9 = torch.nn.Sequential(*list(model.features.children())[27:30])
self.features_10 = torch.nn.Sequential(*list(model.features.children())[30:33])
self.features_11 = torch.nn.Sequential(*list(model.features.children())[34:37])
self.features_12 = torch.nn.Sequential(*list(model.features.children())[37:40])
self.features_13 = torch.nn.Sequential(*list(model.features.children())[40:43])
self.avgpool = nn.AdaptiveAvgPool2d(7)
# classifier
self.classifier = torch.nn.Sequential(
torch.nn.Linear(25088, 4096),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(4096, 4096),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(4096, 2)
)
# forward
def forward(self, x):
x = self.features_1(x)
scale = torch.nn.functional.sigmoid(self.max_pool_1(x) + self.avg_pool_1(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.features_2(x)
scale = torch.nn.functional.sigmoid(self.max_pool_1(x) + self.avg_pool_1(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.pool(x)
x = self.features_3(x)
scale = torch.nn.functional.sigmoid(self.max_pool_2(x) + self.avg_pool_2(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.features_4(x)
scale = torch.nn.functional.sigmoid(self.max_pool_2(x) + self.avg_pool_2(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.pool(x)
x = self.features_5(x)
scale = torch.nn.functional.sigmoid(self.max_pool_3(x) + self.avg_pool_3(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.features_6(x)
scale = torch.nn.functional.sigmoid(self.max_pool_3(x) + self.avg_pool_3(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.features_7(x)
scale = torch.nn.functional.sigmoid(self.max_pool_3(x) + self.avg_pool_3(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.pool(x)
x = self.features_8(x)
scale = torch.nn.functional.sigmoid(self.max_pool_4(x) + self.avg_pool_4(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.features_9(x)
scale = torch.nn.functional.sigmoid(self.max_pool_4(x) + self.avg_pool_4(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.features_10(x)
scale = torch.nn.functional.sigmoid(self.max_pool_4(x) + self.avg_pool_4(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.pool(x)
x = self.features_11(x)
scale = torch.nn.functional.sigmoid(self.max_pool_5(x) + self.avg_pool_5(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.features_12(x)
scale = torch.nn.functional.sigmoid(self.max_pool_5(x) + self.avg_pool_5(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.features_13(x)
scale = torch.nn.functional.sigmoid(self.max_pool_5(x) + self.avg_pool_5(x)).expand_as(x)
x = x * scale
scale = torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
scale = self.spatial_attention(scale)
x = x * scale
x = self.pool(x)
x = self.avgpool(x)
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pretrained_model = torchvision.models.vgg16_bn(pretrained=True)
model = VGG16_CBAM(pretrained_model, 2)
model.to(device)
print(model)
# loss
criterion = torch.nn.CrossEntropyLoss()
# optimizer to be used
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4)
train_losses = []
train_acc = []
val_losses = []
val_acc = []
best_metric = -1
best_metric_epoch = -1
# train and validate
for epoch in range(0, 30):
# train
model.train()
training_loss = 0.0
total = 0
correct = 0
for i, (input, target) in enumerate(train_loader):
input = input.to(device)
target = target.to(device)
optimizer.zero_grad()
output = model(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
training_loss = training_loss + loss.item()
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
training_loss = training_loss / float(len(train_loader))
training_accuracy = str(100.0 * (float(correct) / float(total)))
train_losses.append(training_loss)
train_acc.append(training_accuracy)
# validate
model.eval()
valid_loss = 0.0
total = 0
correct = 0
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
input = input.to(device)
target = target.to(device)
output = model(input)
loss = criterion(output, target)
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
valid_loss = valid_loss + loss.item()
valid_loss = valid_loss / float(len(val_loader))
valid_accuracy = str(100.0 * (float(correct) / float(total)))
val_losses.append(valid_loss)
val_acc.append(valid_accuracy)
# store best model
if(float(valid_accuracy) > best_metric and epoch >= 10):
best_metric = float(valid_accuracy)
best_metric_epoch = epoch
torch.save(model.state_dict(), "best_model.pth")
print()
print("Epoch" + str(epoch) + ":")
print("Training Accuracy: " + str(training_accuracy) + " Validation Accuracy: " + str(valid_accuracy))
print("Training Loss: " + str(training_loss) + " Validation Loss: " + str(valid_loss))
print()
import matplotlib.pyplot as plt
e = []
for index in range(0, 30):
e.append(index)
plt.plot(e, train_losses)
plt.show()
```
| github_jupyter |
WARNING: This notebook may take a while to run all the cells.
```
import requests
url = 'http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz'
r = requests.get(url, allow_redirects=True)
open('kddcup.data_10_percent.gz', 'wb').write(r.content)
```
Downloading the dataset. It will download the dataset in .gz format.
```
!gunzip /content/kddcup.data_10_percent.gz
```
Extracting the dataset.
```
!mv /content/kddcup.data_10_percent /content/kddcup.data_10_percent.csv
```
Changing the extension of the file to .csv
```
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
from sklearn.preprocessing import Normalizer
import warnings
import seaborn as sns
warnings.filterwarnings("ignore")
```
Importing necessary libraries
```
names = ["duration", "protocol_type", "service", "flag", "src_bytes",
"dst_bytes", "land", "wrong_fragment", "urgent", "hot", "num_failed_logins",
"logged_in", "num_compromised", "root_shell", "su_attempted", "num_root",
"num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds",
"is_host_login", "is_guest_login", "count", "srv_count", "serror_rate",
"srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate",
"diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count",
"dst_host_same_srv_rate", "dst_host_diff_srv_rate", "dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate", "dst_host_serror_rate", "dst_host_srv_serror_rate",
"dst_host_rerror_rate", "dst_host_srv_rerror_rate", "label"]
data = pd.read_csv("kddcup.data_10_percent.csv", names=names)
```
Loading the dataset
```
data.head()
pd.set_option('precision', 3)
data1 = data
data1 = data1.drop('protocol_type', axis=1)
data1 = data1.drop('service', axis=1)
data1 = data1.drop('flag', axis=1)
data1 = data1.drop('label', axis=1)
```
Making a copy of the dataset with all numerical values.
Lets check the correlation between the features in the dataset.
```
cor = data1.corr()
plt.figure(figsize=(15,10))
sns.heatmap(cor, annot=False)
plt.show()
```
Now I'll select the highly correlated features and drop them.
```
x = list(cor.columns)
g = []
for i in x:
for j in x[:x.index(i)]:
if cor[i][j] >= 0.7:
g.append((i, j))
o, c = [], 0
for i in g:
for v in o:
if i[0] in [x for x in v] or i[1] in [x for x in v]:
c += 1
if c == 0:
s = {i[0], i[1]}
for j in g[g.index(i) + 1:]:
if i[0] == j[0] or i[0] == j[1] or i[1] == j[0] or i[1] == j[1]:
s.add(j[0])
s.add(j[1])
o.append(s)
c = 0
b = []
listd = set(b)
for i in o:
for j in list(i)[:-1]:
listd.add(j) # listd is the set of correlated columns
for i in listd:
data = data.drop(i, axis=1) # droppped the correlated columns
# Converting Categorical data to numerical
dataX = data.values[:, :data.shape[1] - 1]
dataY = data.values[:, data.shape[1] - 1]
dataY1 = dataY
dataX[:, 1] = LabelEncoder().fit_transform(dataX[:, 1])
dataX[:, 2] = LabelEncoder().fit_transform(dataX[:, 2])
dataX[:, 3] = LabelEncoder().fit_transform(dataX[:, 3])
dataY = LabelEncoder().fit_transform(dataY)
label = {}
# Storing the label in a dictionary of label as key and value as encoded form of that label.
for i in range(len(dataY1)):
label[dataY[i]] = dataY1[i]
from sklearn.model_selection import cross_val_score
```
Creating a function ***acuu*** which will contain all the classifiers. You can add more classifiers in it.<br>
This function splits the data in training and validation set.<br>
Also it Normalizes the dataset.
```
def acuu(x, y):
l = []
k = []
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.33, random_state=7)
fit = Normalizer().fit(x_train)
x_train = fit.fit_transform(x_train)
x_val = fit.transform(x_val)
# MODEL-1) Linear SVC
# ------------------------------------------
from sklearn.svm import LinearSVC
linear_svc = LinearSVC()
linear_svc.fit(x_train, y_train)
y_pred = linear_svc.predict(x_val)
acc_linear_svc = round(accuracy_score(y_pred, y_val) * 100, 2)
print("MODEL-1: Accuracy of LinearSVC : ", acc_linear_svc)
l.append(acc_linear_svc)
acc_linear_svck = cross_val_score(linear_svc, x_val, y_val, cv=10, scoring='accuracy')
print("MODEL-1: Accuracy of Support Vector Machines by k-fold : ", round(acc_linear_svck.mean() *100, 2))
k.append(round(acc_linear_svck.mean() *100, 2))
# MODEL-2) Perceptron
# ------------------------------------------
from sklearn.linear_model import Perceptron
perceptron = Perceptron()
perceptron.fit(x_train, y_train)
y_pred = perceptron.predict(x_val)
acc_perceptron = round(accuracy_score(y_pred, y_val) * 100, 2)
print("MODEL-2: Accuracy of Perceptron : ", acc_perceptron)
l.append(acc_perceptron)
acc_perceptronk = cross_val_score(perceptron, x_val, y_val, cv=10, scoring='accuracy')
print("MODEL-2: Accuracy of Support Vector Machines by k-fold : ", round(acc_perceptronk.mean() * 100, 2))
k.append(round(acc_perceptronk.mean() * 100, 2))
# MODEL-3) KNN or k-Nearest Neighbors
# ------------------------------------------
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(x_train, y_train)
y_pred = knn.predict(x_val)
acc_knn = round(accuracy_score(y_pred, y_val) * 100, 2)
print("MODEL-3: Accuracy of k-Nearest Neighbors : ", acc_knn)
l.append(acc_knn)
acc_knnk = cross_val_score(knn, x_val, y_val, cv=10, scoring='accuracy')
print("MODEL-3: Accuracy of Support Vector Machines by k-fold : ", round(acc_knnk.mean() * 100, 2))
k.append(round(acc_knnk.mean() * 100, 2))
# MODEL-4) Stochastic Gradient Descent
# ------------------------------------------
from sklearn.linear_model import SGDClassifier
sgd = SGDClassifier()
sgd.fit(x_train, y_train)
y_pred = sgd.predict(x_val)
acc_sgd = round(accuracy_score(y_pred, y_val) * 100, 2)
print("MODEL-4: Accuracy of Stochastic Gradient Descent : ", acc_sgd)
l.append(acc_sgd)
acc_sgdk = cross_val_score(sgd, x_val, y_val, cv=10, scoring='accuracy')
print("MODEL-4: Accuracy of Support Vector Machines by k-fold : ", round(acc_sgdk.mean() * 100, 2))
k.append(round(acc_sgdk.mean() * 100, 2))
# MODEL-5) XGBoost
from xgboost import XGBClassifier
classifier = XGBClassifier()
classifier.fit(x_train, y_train)
y_pred = classifier.predict(x_val)
acc_xgb=round(accuracy_score(y_pred, y_val) * 100, 2)
print("MODEL-5: Accuracy of XGBoost : ", acc_xgb)
l.append(acc_xgb)
acc_xgbk = cross_val_score(classifier, x_val, y_val, cv=10, scoring='accuracy')
print("MODEL-5: Accuracy of Support Vector Machines by k-fold : ", round(acc_xgbk.mean() * 100, 2))
k.append(round(acc_xgbk.mean() * 100, 2))
classifiers=['LinearSVC', 'Perceptron','KNeighborsClassifier','SGDClassifier','XGBoost']
plt.bar(classifiers, l, color=['b','m','g','y','maroon','cyan'])
plt.xticks(classifiers, rotation='vertical')
plt.xlabel('Classifiers')
plt.ylabel('Accuracies')
plt.title("Accuracy_Score()")
plt.show()
l = []
plt.bar(classifiers, k, color=['b', 'm', 'g', 'y', 'maroon', 'cyan'])
plt.xticks(classifiers, rotation='vertical')
plt.xlabel('Classifiers')
plt.ylabel('Accuracies')
plt.title("KFolds10")
plt.show()
k=[]
acuu(dataX,dataY)
```
Using following Feature Selection algorithms to select 17 best features and then fitting them to all classifiers.
1. Univariate Feature Selection
2. Recursive Feature Elimination (RFE)
3. Principal Component Analysis (PCA)
4. Linear Disriminant Analysis (LDA)
```
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
test = SelectKBest(score_func=chi2, k=17) # k is number of features
fit = test.fit(dataX, dataY)
train2 = test.transform(dataX)
acuu(train2, dataY)
#
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
model = LogisticRegression()
rfe = RFE(model, 17)
fit = rfe.fit(dataX, dataY)
train2 = fit.transform(dataX)
acuu(train2, dataY)
from sklearn.decomposition import PCA
pca = PCA(17)
fit = pca.fit(dataX, dataY)
train2 = pca.transform(dataX)
acuu(train2, dataY)
import warnings
warnings.filterwarnings("ignore")
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components=17)
fit = lda.fit_transform(dataX, dataY)
train3 = lda.transform(dataX)
# fit = lda.fit(X=dataX, y=dataY)
# train2 = fit.fit_transform(dataX)
print(train3.shape)
acuu(train3, dataY)
```
Using following Feature Selection algorithms to select 11 best features and then fitting them to all classifiers.
1. Univariate Feature Selection
2. Recursive Feature Elimination (RFE)
3. Principal Component Analysis (PCA)
4. Linear Disriminant Analysis (LDA)
```
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
test = SelectKBest(score_func=chi2, k=11) # k is number of features
fit = test.fit(dataX, dataY)
train2 = test.transform(dataX)
acuu(train2, dataY)
#
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
model = LogisticRegression()
rfe = RFE(model, 11)
fit = rfe.fit(dataX, dataY)
train2 = fit.transform(dataX)
acuu(train2, dataY)
from sklearn.decomposition import PCA
pca = PCA(11)
fit = pca.fit(dataX, dataY)
train2 = pca.transform(dataX)
acuu(train2, dataY)
import warnings
warnings.filterwarnings("ignore")
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components=11)
fit = lda.fit_transform(dataX, dataY)
train3 = lda.transform(dataX)
# fit = lda.fit(X=dataX, y=dataY)
# train2 = fit.fit_transform(dataX)
print(train3.shape)
acuu(train3, dataY)
```
Using all Feature Selection Algorithm in a sequence to get best 11 features.<br>
The Sequence is:<br>
Univariate --> RFE --> PCA --> LDA
```
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
test = SelectKBest(score_func=chi2, k=23) # k is number of features
fit = test.fit(dataX, dataY)
train2 = test.transform(dataX)
# acuu(train2, dataY)
#
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
model = LogisticRegression()
rfe = RFE(model, 20)
fit = rfe.fit(train2, dataY)
train3 = fit.transform(train2)
# acuu(train2, dataY)
from sklearn.decomposition import PCA
pca = PCA(16)
fit = pca.fit(train3, dataY)
train4 = pca.transform(train3)
# acuu(train2, dataY)
import warnings
warnings.filterwarnings("ignore")
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components=11)
fit = lda.fit_transform(train4, dataY)
train5 = lda.transform(train4)
# fit = lda.fit(X=dataX, y=dataY)
# train2 = fit.fit_transform(dataX)
print(train5.shape)
acuu(train5, dataY)
```
Using RFE and LDA in a sequence to get best 11 features.<br>
RFE --> LDA
```
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
model = LogisticRegression()
rfe = RFE(model, 17)
fit = rfe.fit(dataX, dataY)
train2 = fit.transform(dataX)
print("RFE")
import warnings
warnings.filterwarnings("ignore")
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components=11)
fit = lda.fit_transform(train2, dataY)
train3 = lda.transform(train2)
# fit = lda.fit(X=dataX, y=dataY)
# train2 = fit.fit_transform(dataX)
print(train3.shape)
acuu(train3, dataY)
```
| github_jupyter |
# Birthquake
We'd like to know what earthquakes occurred on our birthday.
**Proxy trouble? See below.**
Let's make some strings!
```
birthday = '1971-05-26'
birthdayafter = '1971-05-27'
```
Now we can build a URL by concatenating the strings:
```
url = 'https://earthquake.usgs.gov/fdsnws/event/1/query'
query_url = url + "?format=csv&starttime=" + birthday + "&endtime=" + birthdayafter
```
Or even do it with an f-string, which is a tiny bit shorter:
```
query_url = f"{url}?format=csv&starttime={birthday}&endtime={birthdayafter}"
import pandas as pd
df = pd.read_csv(query_url)
df
```
## Using a `dict`
```
query = {
'format': 'csv',
'starttime': birthday,
'endtime': birthdayafter,
}
from urllib.parse import urlencode
pd.read_csv(f"{url}?{urlencode(query)}")
```
## Using `requests`
Pandas is reading the resource using a CSV reader. We can use another library, `requests` to inspect the resource as raw text:
```
import requests
query = {
'format': 'text', # <-- try some other formats
'starttime': birthday,
'endtime': birthdayafter,
}
r = requests.get(url, query)
print(r.text)
```
## Make a map
```
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 5))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.stock_img()
plt.plot(df.loc[4, 'longitude'], df.loc[4, 'latitude'], 'o', color='red')
plt.show()
```
## A nicer map
```
lat, lon = df.latitude, df.longitude
plt.figure(figsize=(10, 5))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.stock_img()
plt.plot(lon, lat, 'o', color='red')
for i, name in enumerate(df.place):
ax.annotate(name, (lon[i], lat[i]))
plt.show()
```
## More elegant
We can use `dateutil` to make it a bit more convenient, allowing us to easily add a day, and specify a timezone:
```
import pandas as pd
from datetime import datetime, timedelta
from urllib.parse import urlencode
def birthquake(birthday:str, tz:str='+0000') -> pd.DataFrame:
"""
Make a DataFrame of earthquakes on a given day.
Timezone argument `tz` is optional and must have
sign and 4 digits.
Example: birthquake("1971-05-26" , tz="+0100")
"""
birthday = datetime.strptime(f"{birthday} {tz}", '%Y-%m-%d %z')
url = 'https://earthquake.usgs.gov/fdsnws/event/1/query'
query = {
'format': 'csv',
'starttime': birthday,
'endtime': birthday + timedelta(days=1),
}
# Amazingly, urlencode knows what to do with a datetime object.
return pd.read_csv(f"{url}?{urlencode(query)}")
birthquake('1971-05-26', '-0000')
```
----
## A note about proxies
If you are in a corporate environment, you probably connect to the Internet through another computer called a 'proxy'. You will need the URL of this proxy; it might look like `https://proxy.my-company.net:8080`. Then use it in your Python environment like this:
proxies = {'https': 'https://proxy.my-company.net:8080'}
r = requests.get(url, proxies=proxies)
Each time you use `requests.get()` you will need to pass the `proxies` dictionary in this way.
| github_jupyter |
SOP054 - Install azdata CLI (using pip)
=======================================
Steps
-----
### Common functions
Define helper functions used in this notebook.
```
# Define `run` function for transient fault handling, hyperlinked suggestions, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("sop054-install-azdata.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'python': [], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)']}
error_hints = {'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP012 - Install unixodbc for Mac', '../install/sop012-brew-install-odbc-for-sql-server.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb']], 'azdata': [['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ["[Errno 2] No such file or directory: '..\\\\", 'TSG053 - ADS Provided Books must be saved before use', '../repair/tsg053-save-book-first.ipynb'], ["NameError: name 'azdata_login_secret_name' is not defined", 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', "TSG124 - 'No credentials were supplied' error from azdata login", '../repair/tsg124-no-credentials-were-supplied.ipynb']]}
install_hint = {'python': [], 'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']}
```
### Install azdata CLI
```
run(f'python --version')
run(f'python -m pip install -r https://aka.ms/azdata')
```
### Display azdata version
```
run("azdata --version")
```
### Related (SOP063, SOP054)
```
print('Notebook execution complete.')
```
| github_jupyter |
# 03. DQN example with CartPole
## Colab 용 package 설치 코드
```
!pip install gym
!pip install JSAnimation
```
### package import
```
# The typical imports
from IPython.display import clear_output
import gym
import numpy as np
import matplotlib.pyplot as plt
import random
%matplotlib inline
import tensorflow as tf
np.random.seed(777)
tf.set_random_seed(777)
random.seed(777)
print("tensorflow version: ", tf.__version__)
print("gym version: ", gym.__version__)
```
### 게임 화면을 보여주기 위한 함수
```
# Imports specifically so we can render outputs in Jupyter.
from JSAnimation.IPython_display import display_animation
from matplotlib import animation
from IPython.display import display
def display_frames_as_gif(frames):
"""
Displays a list of frames as a gif, with controls
"""
#plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi = 72)
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)
display(display_animation(anim, default_mode='loop'))
```
### 그래프를 그리기 위한 함수
```
def plot(frame_idx, episode, rewards, losses):
clear_output(True)
plt.figure(figsize=(20,5))
plt.subplot(131)
plt.title('episode %s. reward: %s' % (episode, np.mean(rewards[-10:])))
plt.plot(rewards)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.show()
```
<script src='https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.4/MathJax.js?config=TeX-MML-AM_CHTML' async></script>
## CartPole
CartPole is game that ballance pole on the car.
this game's observation is $x$, $x\prime$, $\theta$, $\theta\prime$
$x$ : 카트의 위치
$\theta$ : 막대의 각도
$x\prime$ : 카트의 속도
$\theta\prime$ : 막대의 각속도
Action is **Left** or **Right**
모든 step 마다 보상을 1 받으며, 아래 3가지 경우에 episode가 끝난다.
1. 카트가 바깥으로 나갈 때
2. 기둥이 너무 많이 기울었을 때
3. 200 step 지났을 때
<img src="./img/cartpole.gif" width="60%" align="left">
```
# CartPole 환경
env_id = "CartPole-v0"
env = gym.make(env_id)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
print("Observation size : ", state_size)
print("Action size : ", action_size)
```
## DQN Agent
### Replay Buffer
```
state = env.reset()
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
print("state_size:", np.shape(state))
print("next_state_size:", np.shape(next_state))
# deque buffer 생성
buffer = []
# buffer에 transition((state, action, reward, next_state, done))을 append
transition =
# buffer에 append
print(np.shape(buffer))
print(buffer)
# buffer에서 batch size만큼 sampling
for i in range(5):
buffer.append(transition)
# {} 빈칸을 채우세요.
idxs = np.random.choice({}, size={}, replace=False)
state, action, reward, next_state, done = [], [], [], [], []
for i in {}:
s, a, r, n_s, d = buffer[{}]
state.append(np.array({}, copy=False))
action.append(np.array({}, copy=False))
reward.append(np.array({}, copy=False))
next_state.append(np.array({}, copy=False))
done.append(np.array({}, copy=False))
state = np.array({})
action = np.array({})
reward = np.array({})
next_state = np.array({})
done = np.array({})
print(np.shape(state))
print(next_state)
print(action)
print(reward)
print(done)
```
### Replay Buffer class
```
# Unifrom Replay Buffer
class ReplayBuffer(object):
def __init__(self, capacity):
self.buffer =
self.capacity =
self.idx = 0
# buffer 길이 체크
def __len__(self):
# buffer에 sample 추가
def add(self, state, action, reward, next_state, done):
transition = (state, action, reward, next_state, done)
if len(self.buffer) == self.capacity:
# buffer가 꽉차면 0번째부터 다시 채운다
else:
# buffer append
# buffer에서 batch_size만큼 뽑기
def sample(self, batch_size):
# sample code 작성
return state, action, reward, next_state, done
# buffer 검증
b = ReplayBuffer(5)
print(len(b))
s, a, r, n_s, d = transition
for _ in range(5):
b.add(s, a, r, n_s, d)
print("sample", b.sample(2))
# add 검증
a = np.array([3,3])
d = True
for _ in range(5):
b.add(s, a, r, n_s, d)
print(len(b))
print("new sample", b.sample(2))
```
### DQN Agent Class
<img src="./img/hyperparameters.png" width="100%" align="left">
Q Learning에서 Q함수의 업데이트식은 다음과 같다.
$$Q(S,A) \gets Q(S,A) + \alpha [r + \gamma max_{a\prime}Q(S \prime, a \prime) - Q(S,A)]$$
DQN에서는 업데이트식에서 TD error 부분을 Loss로 보고 학습한다.
$$ Loss = E [(y - Q(S,A))^{2}]$$
```
layer = tf.contrib.layers
class DQNAgent:
def __init__(self, sess, state_size, action_size):
self.sess = sess
self.state_size = state_size
self.action_size = action_size
# hyper parameter
self.batch_size = 32
self.gamma = 0.99
self.learning_rate = 0.00025
# epsilon
self.s_epsilon = 1.0
self.e_epsilon = 0.01
self.n_epsilon_decay = 1000
self.epsilon = self.s_epsilon
# place holder
self.input_policy = tf.placeholder(tf.float32, shape=(None, self.state_size))
self.input_target = tf.placeholder(tf.float32, shape=(None, self.state_size))
self.actions = tf.placeholder(tf.int32, shape=None)
self.targets = tf.placeholder(tf.float32, shape=None)
# network
self.policy_q = self._build_network(self.input_policy, net_name="policy_net")
self.target_q = self._build_network(self.input_target, net_name="target_net")
self.sess.run(tf.global_variables_initializer())
self.update_target_network()
# replay buffer
# 직접 작성해보세요.
self.buffer =
# optimizer
self.loss_op, self.train_op = self._build_op()
def _build_network(self, inputs, name):
"""
tf.contrib.layers.fully_connected()를 이용해
hidden layer가 하나인 신경망을 구성합니다.
입력 : 상태 (state_size)
출력 : q-value (action_size)
hidden layer size : 128
activation function : Relu
"""
# 빈칸 {} 을 지우고 채워주세요.
# 참고) layer.fully_connected(입력, 출력 사이즈, activation function)
# 참고2) relu -> tf.nn.relu
with tf.variable_scope(net_name):
fc1 = layer.{}(
inputs={},
num_outputs={},
activation_fn={},
)
fc2 = layer.{}(
inputs={},
num_outputs={},
activation_fn=tf.nn.relu,
)
q_value = layer.{}(
inputs={},
num_outputs={},
activation_fn=None,
)
return q_value
def _build_op(self):
"""신경망 학습을 위한 Loss function과 Optimaizer를 정의합니다."""
# 직접 작성해보세요.
# 참고) # 이전 실습에서 현재 action에 대한 Q_value 구하는 연산
# curr_action = tf.one_hot(input_action, action_size)
# curr_q_value = tf.reduce_sum(tf.multiply(q_value, curr_action))
action_one_hot =
predict_q =
# 참고) 이전 실습에서 Loss 함수 구성
# loss_op = tf.square(target - curr_q_value)
# opt = tf.train.GradientDescentOptimizer(learning_rate=0.1)
# train_op = opt.minimize(loss_op)
loss_op = tf.reduce_mean(tf.square(self.targets - predict_q))
train_op = tf.train.RMSPropOptimizer(
learning_rate=self.learning_rate,
decay=0.95,
momentum=0.95,
epsilon=0.01
).minimize(loss_op)
return loss_op, train_op
def update_model(self):
"""
replay buffer에서 batch size만큼 가져온 후
학습 네트워크를 학습합니다.
loss function은 위의 수식 참고
"""
# replay buffer로부터 transition을 가져옴
# 직접 작성해보세요.
# 참고) 위에 작성한 replay buffer class, replay_buffer.sample()
states, actions, rewards, next_states, dones = self.buffer.sample(self.batch_size)
# target 계산
# 아래 eval코드는 sess.run과 같은 동작을 함.
target_q = self.target_q.eval({self.input_target: next_states}, self.sess)
target_q = # max
targets = # target 계산
# loss 계산 및 학습
loss, _ = self.sess.run(
[{}, {}],
feed_dict={self.{}: {}, # state
self.{}: {}, # action
self.{}: {}}) # target
return loss
def update_target_network(self):
"""
학습 네트웍의 변수의 값들을 타겟 네트웍으로 복사해서 타겟 네트웍의 값들을 최신으로 업데이트합니다.
"""
copy_op = []
main_q_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='policy_net')
target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target_net')
for main_q_var, target_var in zip(main_q_vars, target_vars):
copy_op.append(target_var.assign(main_q_var.value()))
self.sess.run(copy_op)
def select_action(self, state):
"""epsilon-greedy로 action을 선택합니다."""
# epsilon greedy policy
if self.epsilon > np.random.random():
# random action
selected_action = np.random.randint(self.action_size)
else:
# policy action 구현
# 매 step마다 epsilon을 줄여나갑니다.
if self.epsilon >= self.e_epsilon:
self.epsilon -= (self.s_epsilon - self.e_epsilon) / self.n_epsilon_decay
return selected_action
```
### DQN agent train
```
# Session 열기
tf.reset_default_graph()
sess = tf.Session()
# DQN Agent 객체 생성
agent = DQNAgent(sess, state_size, action_size)
# 변수 초기화
sess.run(tf.global_variables_initializer())
```
### DQN 학습
```
EPISODE = 30
replay_initial = 50
target_update = 10
total_step = 1
all_episode_reward = []
losses = []
for e in range(EPISODE):
print("EPISODE: {}".format(e+1))
observation = env.reset()
done = False
step = 1
episode_reward = 0
frames = []
while not done:
# 직접 작성해보세요.
# action 선택
action =
# 선택한 action으로 env.step()
next_observation, reward, done, _ = env.step(action)
step += 1
total_step += 1
episode_reward += reward
# trajectory(S, A, R, S', done)를 Replay buffer에 저장
# replay buffer에 저장하는 코드 작성
{}
observation = next_observation
# 만약에 episode가 끝났으면 reward 저장
if done:
all_episode_reward.append(episode_reward)
# replay buffer가 일정 이상 채워지면 학습 시작
if len(agent.buffer) > replay_initial:
# 신경망 업데이트 코드 작성
loss =
losses.append(loss)
# 일정 step마다 target Q 업데이트
if total_step > replay_initial and total_step % target_update == 0:
# policy network를 target network에 복사해주는 코드 작성
# 그래프 그리기
if total_step % 100 == 0:
plot(step, e, all_episode_reward, losses)
print(total_step)
env.close()
```
### 학습된 DQN 테스트
```
EPISODE = 1
all_episode_reward = []
losses = []
for e in range(EPISODE):
print("EPISODE: {}".format(e+1))
observation = env.reset()
done = False
step = 1
episode_reward = 0
frames = []
agent.epsilon = 0
while not done:
action = int(agent.select_action(observation))
next_observation, reward, done, _ = env.step(action)
step += 1
total_step += 1
episode_reward += reward
observation = next_observation
if done:
all_episode_reward.append(episode_reward)
# 게임화면 보여주기
if e % 1 == 0:
frames.append(env.render(mode = 'rgb_array'))
env.close()
print("step", step)
print("reward", episode_reward)
if len(frames) > 0:
display_frames_as_gif(frames)
```
| github_jupyter |
```
from influxdb import InfluxDBClient
import pandas as pd
import matplotlib.pylab as plt
%matplotlib inline
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 15, 6
plt.style.use('ggplot')
from pandas import read_csv
folderName = 's1t1rc1nc1t2mediumcomputeprimeappt2micro1r3'
host = '10.155.208.132'
port = 8086
username= 'root'
password = 'root'
client = InfluxDBClient(host, port,username , password, folderName+'_k8s')
clientK6 = InfluxDBClient(host, port, username, password, folderName+'_TestK6')
appnames = ["primeapp"]
def getAllNodeNames():
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY=nodename;")
nodeNames_temp = list(queryResult.get_points())
dfnodeNames = pd.DataFrame(nodeNames_temp)
allNodeNames = dfnodeNames[:]["value"]
return allNodeNames
def getNamespaceNames(node):
nsQuery = client.query("SHOW TAG VALUES FROM uptime WITH KEY=namespace_name WHERE nodename = '"+node+"';")
nsQuery_temp = list(nsQuery.get_points())
dfnsNames = pd.DataFrame(nsQuery_temp)
allnsNames = dfnsNames[:]["value"]
return allnsNames
def getAllPodNames(node,ns_name):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY = pod_name WHERE namespace_name = '"+ns_name+"' AND nodename = '"+node+"';")
podNames_temp = list(queryResult.get_points())
dfpodNames = pd.DataFrame(podNames_temp)
if dfpodNames.empty:
return dfpodNames
else:
allpodNames = dfpodNames[:]["value"]
return allpodNames
def getCPUUtilizationNode(node):
queryResult = client.query('SELECT * FROM "cpu/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/node_utilization'])
return dfcpuUtilization
def getCPUUtilizationPod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def getCPUUtilizationPodContainer(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def prepareCpuUtilization(node,ns_name, pod_name):
cpuUtilization = getCPUUtilizationNode(node)
podCpuUtilization = getCPUUtilizationPod(node,ns_name, pod_name)
containercpuUtilization = getCPUUtilizationPodContainer(node,ns_name, pod_name)
plt.plot(cpuUtilization.index, cpuUtilization['value'] *1000, 'r', label="node") # plotting t, a separately
plt.plot(podCpuUtilization.index, podCpuUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containercpuUtilization.index, containercpuUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getMemoryUtilizationNode(node):
queryResult = client.query('SELECT * FROM "memory/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/node_utilization'])
return dfmemUtilization
def getMemoryUtilizationPod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def getMemoryUtilizationPodContainer(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def prepareMemoryUtilization(node,ns_name, pod_name):
memoryUtilization = getMemoryUtilizationNode(node)
podMemoryUtilization = getMemoryUtilizationPod(node,ns_name, pod_name)
containerMemoryUtilization = getMemoryUtilizationPodContainer(node,ns_name, pod_name)
plt.plot(memoryUtilization.index, memoryUtilization['value'], 'r', label="node") # plotting t, a separately
plt.plot(podMemoryUtilization.index, podMemoryUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containerMemoryUtilization.index, containerMemoryUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getNetworkTxRatePod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_rate'])
return dfmemUtilization
def getNetworkTxPod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx'])
return dfmemUtilization
def getNetworkTxErrorsPod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors'])
return dfmemUtilization
def getNetworkTxErrorsRatePod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors_rate'])
return dfmemUtilization
def prepareNetworkTxRateUtilization(node,ns_name, pod_name):
podNetworTxRate = getNetworkTxRatePod(node,ns_name, pod_name)
podNetworTx = getNetworkTxPod(node,ns_name, pod_name)
podNetworkError = getNetworkTxErrorsPod(node,ns_name, pod_name)
podNetworkErrorRate = getNetworkTxErrorsRatePod(node,ns_name, pod_name)
plt.plot(podNetworTxRate.index, podNetworTxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworTx.index, podNetworTx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getNetworkRxRatePod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_rate'])
return dfmemUtilization
def getNetworkRxPod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx'])
return dfmemUtilization
def getNetworkRxErrorsPod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors'])
return dfmemUtilization
def getNetworkRxErrorsRatePod(node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors_rate'])
return dfmemUtilization
def prepareNetworkRxRateUtilization(node,ns_name, pod_name):
podNetworRxRate = getNetworkRxRatePod(node,ns_name, pod_name)
podNetworRx = getNetworkRxPod(node,ns_name, pod_name)
podNetworkError = getNetworkRxErrorsPod(node,ns_name, pod_name)
podNetworkErrorRate = getNetworkRxErrorsRatePod(node,ns_name, pod_name)
plt.plot(podNetworRxRate.index, podNetworRxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworRx.index, podNetworRx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getRelevantNodeName(ns_name):
allNodeNames = getAllNodeNames()
#nsNames = getNamespaceNames(allNodeNames[0])
for node in allNodeNames:
allPodNamesNode = getAllPodNames (node, ns_name)
if(not allPodNamesNode.empty):
return node
def getNodeResourceUtilizationDf(nodeName):
Result_node_CPU = client.query("SELECT value from \"cpu/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_MEM = client.query("SELECT value from \"memory/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_CPU_Cores = client.query("SELECT mean(\"value\") FROM \"cpu/node_capacity\" where nodename = '"+nodeName+"' AND type = 'node' GROUP BY time(1m)")
Result_node_mem_node = client.query("SELECT mean(\"value\")FROM \"memory/node_capacity\" where nodename = '"+nodeName+"' AND type = 'node' GROUP BY time(1m)")
cpu_points = pd.DataFrame(Result_node_CPU.get_points())
cpu_points['time'] = pd.to_datetime(cpu_points['time'])
cpu_points = cpu_points.set_index('time')
cpu_points.columns = ['node_cpu_util']
mem_points = pd.DataFrame(Result_node_MEM.get_points())
mem_points['time'] = pd.to_datetime(mem_points['time'])
mem_points = mem_points.set_index('time')
mem_points.columns = ['node_mem_util']
cores_points = pd.DataFrame(Result_node_CPU_Cores.get_points())
cores_points['time'] = pd.to_datetime(cores_points['time'])
cores_points = cores_points.set_index('time')
cores_points.columns = ['node_cores']
mem_node_points = pd.DataFrame(Result_node_mem_node.get_points())
mem_node_points['time'] = pd.to_datetime(mem_node_points['time'])
mem_node_points = mem_node_points.set_index('time')
mem_node_points.columns = ['node_mem']
df_node =pd.concat([cpu_points, mem_points,cores_points,mem_node_points], axis=1)
return df_node
def getPodResourceUtilizationDf(node, ns_name, pod_name):
Result_Pod_CPU_usage = client.query('SELECT value FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
Result_Pod_MEM_usage = client.query('SELECT value from \"memory/usage\" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
Result_Pod_CPU_limit = client.query('SELECT mean(\"value\") FROM "cpu/limit" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_limit = client.query('SELECT mean(\"value\") from \"memory/limit\" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_CPU_requests = client.query('SELECT mean(\"value\") FROM "cpu/request" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_requests = client.query('SELECT mean(\"value\") from \"memory/request\" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
cpu_points_usage = pd.DataFrame(Result_Pod_CPU_usage.get_points())
cpu_points_usage['time'] = pd.to_datetime(cpu_points_usage['time'])
cpu_points_usage = cpu_points_usage.set_index('time')
cpu_points_usage.columns = ['pod_cpu_usage']
mem_points_usage = pd.DataFrame(Result_Pod_MEM_usage.get_points())
mem_points_usage['time'] = pd.to_datetime(mem_points_usage['time'])
mem_points_usage = mem_points_usage.set_index('time')
mem_points_usage.columns = ['pod_mem_usage']
cpu_points_limits = pd.DataFrame(Result_Pod_CPU_limit.get_points())
cpu_points_limits['time'] = pd.to_datetime(cpu_points_limits['time'])
cpu_points_limits = cpu_points_limits.set_index('time')
cpu_points_limits.columns = ['pod_cpu_limit']
mem_points_limits = pd.DataFrame(Result_Pod_MEM_limit.get_points())
mem_points_limits['time'] = pd.to_datetime(mem_points_limits['time'])
mem_points_limits = mem_points_limits.set_index('time')
mem_points_limits.columns = ['pod_mem_limit']
cpu_points_request = pd.DataFrame(Result_Pod_CPU_requests.get_points())
cpu_points_request['time'] = pd.to_datetime(cpu_points_request['time'])
cpu_points_request = cpu_points_request.set_index('time')
cpu_points_request.columns = ['pod_cpu_request']
mem_points_request = pd.DataFrame(Result_Pod_MEM_requests.get_points())
mem_points_request['time'] = pd.to_datetime(mem_points_request['time'])
mem_points_request = mem_points_request.set_index('time')
mem_points_request.columns = ['pod_mem_request']
df_pod =pd.concat([cpu_points_usage, mem_points_usage,cpu_points_limits,mem_points_limits,cpu_points_request,mem_points_request ], axis=1)
return df_pod
def getRequestsDf():
queryResult = clientK6.query('SELECT sum("value") FROM "vus" group by time(1m);')
vus = pd.DataFrame(queryResult['vus'])
vus.columns = ['vus','time']
vus = vus.set_index('time')
queryResultReqs = clientK6.query('SELECT sum("value") FROM "http_reqs" group by time(1m);')
reqs = pd.DataFrame(queryResultReqs['http_reqs'])
reqs.columns = ['requests','time']
reqs = reqs.set_index('time')
queryResultReqsDuration95 = clientK6.query('SELECT percentile("value", 95) FROM "http_req_duration" group by time(1m) ;')
reqs_duration95 = pd.DataFrame(queryResultReqsDuration95['http_req_duration'])
reqs_duration95.columns = [ 'requests_duration_percentile_95','time']
reqs_duration95 = reqs_duration95.set_index('time')
queryResultReqsDuration90 = clientK6.query('SELECT percentile("value", 90) FROM "http_req_duration" group by time(1m) ;')
reqs_duration90 = pd.DataFrame(queryResultReqsDuration90['http_req_duration'])
reqs_duration90.columns = ['requests_duration_percentile_90','time']
reqs_duration90 = reqs_duration90.set_index('time')
queryResultMaxDuration = clientK6.query('SELECT max("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_max = pd.DataFrame(queryResultMaxDuration['http_req_duration'])
reqs_duration_max.columns = ['requests_duration_max','time']
reqs_duration_max = reqs_duration_max.set_index('time')
queryResultMinDuration = clientK6.query('SELECT min("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_min = pd.DataFrame(queryResultMinDuration['http_req_duration'])
reqs_duration_min.columns = ['requests_duration_min','time']
reqs_duration_min = reqs_duration_min.set_index('time')
queryResultMeanDuration = clientK6.query('SELECT mean("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_mean = pd.DataFrame(queryResultMeanDuration['http_req_duration'])
reqs_duration_mean.columns = ['requests_duration_mean','time']
reqs_duration_mean = reqs_duration_mean.set_index('time')
queryResultMedianDuration = clientK6.query('SELECT median("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_median = pd.DataFrame(queryResultMedianDuration['http_req_duration'])
reqs_duration_median.columns = ['requests_duration_median','time']
reqs_duration_median = reqs_duration_median.set_index('time')
finalDF = pd.merge(vus, reqs, left_index=True, right_index=True)
finalDF = pd.merge(finalDF, reqs_duration95, left_index=True, right_index=True)
finalDF = pd.merge(finalDF, reqs_duration90, left_index=True, right_index=True)
finalDF = pd.merge(finalDF,reqs_duration_max, left_index=True, right_index=True)
finalDF = pd.merge(finalDF,reqs_duration_min, left_index=True, right_index=True)
finalDF = pd.merge(finalDF,reqs_duration_mean, left_index=True, right_index=True)
finalDF = pd.merge(finalDF,reqs_duration_median, left_index=True, right_index=True)
finalDF.index = pd.to_datetime(finalDF.index)
return finalDF
def getPodsNodesRequestsDf():
default_ns_name = "default"
relevantNodeName = getRelevantNodeName(default_ns_name)
podNames = getAllPodNames(relevantNodeName, default_ns_name)
df_node = getNodeResourceUtilizationDf(relevantNodeName)
df_pods_node = []
for podName in podNames:
df_pod = getPodResourceUtilizationDf(relevantNodeName, default_ns_name, podName)
finalDF = pd.merge(df_node,df_pod, left_index=True, right_index=True)
requestsDF = getRequestsDf()
finalDF = pd.merge(finalDF,requestsDF, left_index=True, right_index=True)
finalDF['pod_cpu_usage'] = finalDF['pod_cpu_usage']
finalDF['pod_cpu_limit'] = finalDF['pod_cpu_limit']
finalDF['pod_cpu_request'] = finalDF['pod_cpu_request']
finalDF['pod_mem_usage'] = finalDF['pod_mem_usage']
finalDF['pod_mem_limit'] = finalDF['pod_mem_limit']
finalDF['pod_mem_request'] = finalDF['pod_mem_request']
finalDF['node_cores'] = finalDF['node_cores']/1000
finalDF['node_mem'] = finalDF['node_mem']/(1073741824)
finalDF = finalDF.fillna(0)
finalDF = finalDF[(finalDF.T != 0).any()]
df_pods_node.append(finalDF)
return df_pods_node
df_pods_node = getPodsNodesRequestsDf()
df_pods_node[0].head()
df_pods_node[0] = df_pods_node[0].reset_index(drop=True)
plt.plot(df_pods_node[0]['requests'])
plt.title("RPM")
plt.ylabel("RPM")
plt.grid(True)
plt.show()
```
# Node Utilization (CPU and memory)
```
plt.figure()
plt.plot(df_pods_node[0]['pod_cpu_usage'], label='pod_cpu_usage')
#plt.plot(df_pods_node[0]['pod_mem_usage'], label='pod_mem_usage')
plt.legend()
plt.show()
df_pods_node[0].fillna(0)
df_pods_node[0].corr()
dftemp_cpu = df_pods_node[0][['requests','node_cores','node_mem', 'node_cpu_util', 'pod_cpu_usage','pod_cpu_limit','pod_cpu_request','pod_mem_limit','pod_mem_request', 'requests_duration_mean', 'requests_duration_percentile_95']]
dftemp_mem = df_pods_node[0][['requests', 'node_cores','node_mem', 'node_mem_util','pod_cpu_limit','pod_cpu_request','pod_mem_usage','pod_mem_limit','pod_mem_request', 'requests_duration_mean', 'requests_duration_percentile_95']]
plt.plot( dftemp_cpu['node_cpu_util'], color='blue', linewidth=2)
plt.plot( dftemp_cpu['pod_cpu_usage'], color='red', linewidth=2)
plt.plot( dftemp_cpu['requests'], color='green', linewidth=2)
plt.plot(dftemp_cpu['requests_duration_percentile_95'], color='blue', linewidth=2)
import seaborn as sb
```
# Linear Regression
```
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
# Use only one feature
df_X = dftemp_cpu[['requests']].values
df_Y = dftemp_cpu[['node_cpu_util']].values
from numpy import *
from scipy.interpolate import *
df_X = df_X.flatten()
df_Y = df_Y.flatten()
p1=polyfit(df_X, df_Y, 1)
p2=polyfit(df_X, df_Y, 2)
p3=polyfit(df_X, df_Y, 3)
plt.plot(df_X, df_Y,'o')
#plt.plot(df_X, polyval(p1,df_X), 'b-')
#plt.plot(df_X, polyval(p2,df_X), 'g-')
plt.plot(df_X, polyval(p3,df_X), 'y-')
p3
# Use only one feature
df_X = dftemp_cpu[['requests', 'node_cores']].values
df_Y = dftemp_cpu[['node_cpu_util']].values
X_train, X_test, y_train, y_test = train_test_split(df_X, df_Y, test_size=0.33, random_state=42)
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import Pipeline
# Create linear regression object
model = linear_model.LinearRegression()
#model = Pipeline([('poly', PolynomialFeatures(degree=2)),
# ('linear', LinearRegression(fit_intercept=False))])
#regr = linear_model.Ridge (alpha = .01)
#regr = linear_model.Lasso(alpha = 0.1)
#regr = linear_model.LassoLars(alpha=.1)
#regr = make_pipeline(PolynomialFeatures(2), Ridge())
# Train the model using the training sets
model.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = model.predict(X_test)
# The coefficients
print('Coefficients: \n', model.coef_)
print('intercept: \n', model.intercept_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred))
#print ('Train score %.2f', regr.score(X_train, y_train) )
#print ('Test score %.2f', regr.score(X_test, y_test) )
#print ('Pred score %.2f', regr.score(X_test, y_pred) )
# Plot outputs
plt.scatter(X_test[:,0], y_test, color='black')
#plt.plot(X_test[:,0], y_pred, color='blue')
plt.plot(X_test[:,0],y_pred,'-r')
plt.show()
model.predict([[4500, 1]])
#pd.DataFrame(list(zip(y_pred,y_test)), columns = ['predict', 'test'])
```
# dataset_pod_hello_world
```
dataset_pod_hello_world.index = pd.to_datetime(dataset_pod_hello_world.index)
merged.index = pd.to_datetime(merged.index)
newmergedhello = dataset_pod_hello_world.reindex(merged.index, method='nearest')
finalDFhello = pd.merge(newmergedhello, merged, left_index=True, right_index=True)
finalDFhello.to_csv('final_hello.csv')
dfhello = read_csv('final_hello.csv',index_col=0)
dfhello = dfhello.fillna(0)
dfhello = dfhello.sort_values(by=['aggregate.rps.mean'])
dfhello = dfhello.reset_index()
dfhello = dfhello[['aggregate.rps.mean', 'cpu', 'aggregate.scenarioDuration.median']]
plt.plot(dfhello['aggregate.rps.mean'], dfhello['cpu'], color='blue', linewidth=3)
def linear(dft):
# Use only one feature
df_X = dft[['aggregate.rps.mean']].values
df_Y = dft[['cpu']].values
X_train, X_test, y_train, y_test = train_test_split(df_X, df_Y, test_size=0.33, random_state=42)
# Create linear regression object
regr = linear_model.LinearRegression(normalize=True)
#regr = linear_model.Ridge (alpha = .5)
#regr = linear_model.Lasso(alpha = 0.1)
#regr = linear_model.LassoLars(alpha=.1)
#regr = make_pipeline(PolynomialFeatures(3), Ridge())
# Train the model using the training sets
regr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = regr.predict(X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
print('intercept: \n', regr.intercept_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred))
print ('Train score %.2f', regr.score(X_train, y_train) )
print ('Test score %.2f', regr.score(X_test, y_test) )
print ('Pred score %.2f', regr.score(X_test, y_pred) )
# Plot outputs
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, y_pred, color='blue')
plt.show()
linear(dfhello)
dataset_pod_pdescp.index = pd.to_datetime(dataset_pod_pdescp.index)
merged.index = pd.to_datetime(merged.index)
newmergedpdescp = dataset_pod_pdescp.reindex(merged.index, method='nearest')
finalDFpdescp = pd.merge(newmergedpdescp, merged, left_index=True, right_index=True)
finalDFpdescp.to_csv('final_pdescp.csv')
dfpdescp = read_csv('final_pdescp.csv',index_col=0)
dfpdescp = dfpdescp.fillna(0)
dfpdescp = dfpdescp.sort_values(by=['aggregate.rps.mean'])
dfpdescp = dfpdescp.reset_index()
dfpdescp = dfpdescp[['aggregate.rps.mean', 'cpu']]
plt.plot(dfpdescp['aggregate.rps.mean'], dfpdescp['cpu'], color='blue', linewidth=3)
linear(dfpdescp)
dataset_pod_server.index = pd.to_datetime(dataset_pod_server.index)
merged.index = pd.to_datetime(merged.index)
newmergedserver = dataset_pod_server.reindex(merged.index, method='nearest')
finalDFserver = pd.merge(newmergedserver, merged, left_index=True, right_index=True)
finalDFserver.to_csv('final_server.csv')
dfpserver = read_csv('final_server.csv',index_col=0)
dfpserver = dfpserver.fillna(0)
dfpserver = dfpserver.sort_values(by=['aggregate.rps.mean'])
dfpserver = dfpserver.reset_index()
dfpserver = dfpserver[['aggregate.rps.mean', 'cpu']]
plt.plot(dfpserver['aggregate.rps.mean'], dfpserver['cpu'], color='blue', linewidth=3)
linear(dfpserver)
```
| github_jupyter |
# 1. Data Aquisition
We will reference the publically available [Reddit dump](https://www.reddit.com/r/datasets/comments/3bxlg7/i_have_every_publicly_available_reddit_comment/). The dataset is publically available on Google BigQuery and is divided across months from December 2015 - October 2018. BigQuery allows us to perform low latency queries on massive datasets. One example is [this](https://bigquery.cloud.google.com/table/fh-bigquery:reddit_posts.2018_08). Unfortunately the posts have not been tagged with their comments. To extract this information, in addition to BigQuery, we will use [PRAW](https://praw.readthedocs.io/en/latest/) for this task.
The idea is to randomly query a subset of posts from December 2015 - October 2018. Then for each of the post, use praw to get comments for each one.
We are considering 11 flairs:
```
1. AskIndia
2. Politics
3. Sports
4. Food
5. [R]eddiquette
6. Non-Political
7. Scheduled
8. Business/Finance
9. Science/Technology
10. Photography
11. Policy/Economy
```
### Note: This notebook requires a GCP Acount, a Reddit account, CLOUD SDK installed.
Go to [cloud sdk for more info](https://cloud.google.com/sdk/)
Follow the following steps before running this notebook:
```
1. Intall big query locally using- pip install --upgrade google-cloud-bigquery
2. In the GCP Console, go to the Create service account key page.
3. From the Service account drop-down list, select New service account.
4. In the Service account name field, enter a name .
5. From the Role drop-down list, select Project > Owner.
6. Click Create. A JSON file that contains your key downloads to your computer.
7. In a new session, execute the following command-
export GOOGLE_APPLICATION_CREDENTIALS="/home/user/Downloads/[FILE_NAME].json"
```
### To setup reddit credentials
```
1. Go to https://www.reddit.com/prefs/apps
2. Click create app at the bottom
3. Enter an app name, choose 'script' and enter http://localhost:8080 in redirect uri
4. Save the client id, client secret
```
### Run mongo client in a different session
```
1. In a seperate session(terminal), run ./mongod before proceeding.
```
##### Importing all libraries
Here we will be using
1. PyMongo - a python wrapper for MongoDB to build our train and test datasets
2. PRAW - a python wrapper for reddit API
3. Numpy
4. Pandas
```
import pymongo
from pymongo import MongoClient
import numpy as np
import praw
import pandas as pd
from google.cloud import bigquery
```
Initializing the praw.reddit(), biguery.Client() and MongoClient() objects
```
# Enter your credentials here
client_id = ''
client_secret = ''
user_agent = ''
username = ''
password = ''
reddit = praw.Reddit(client_id=client_id, \
client_secret=client_secret, \
user_agent=user_agent, \
username=username, \
password=password)
client = bigquery.Client()
mongo_client = MongoClient()
```
#### Here we are querying the dataset from 2015-2018 limiting results to 100000 records and save to a dataframe
```
QUERY_POSTS = (
'SELECT * except (domain, subreddit, author_flair_css_class, link_flair_css_class, author_flair_text,'
'from_kind, saved, hide_score, archived, from_id, name, quarantine, distinguished, stickied,'
'thumbnail, is_self, retrieved_on, gilded, subreddit_id) '
'FROM `fh-bigquery.reddit_posts.201*`'
'WHERE subreddit = "india" and link_flair_text in ("Sports", "Politics", "AskIndia", "Business/Finance", "Food",'
'"Science/Technology", "Non-Political", "Photography", "Policy/Economy", "Scheduled", "[R]eddiquette") '
'LIMIT 100000'
)
query_job = client.query(QUERY_POSTS)
query = query_job.result().to_dataframe()
```
### Buidling our train and test sets
To build a balanced dataset, we will limit the number of samples for each flair at 2000 and randomly sample from the extracted dataset.
```
keep = []
data = query
flairs = [flair for flair in flairs if not str(flair) == 'nan']
for flair in flairs:
l = len(data[data['link_flair_text'] == flair])
if l > 2000:
l = 2000
idx = list(data[data['link_flair_text'] == flair]['id'])
c = np.random.choice(idx, l, replace=False)
for i in c:
keep.append(i)
print (len(keep))
```
We keep only these samples and discard others.
```
data = data[data['id'].isin(keep)]
```
### Saving the dataset to a mongoDB collection
Here we define a mongodb database - "dataset" and dump the dataframe to collection "reddit_data". Before doing this, we use praw to get comments for each dataset and add this feature as comments to our dataset. For each post, we limit to top 10 comments.
```
mongo_client = MongoClient('mongodb://localhost:27017/')
db = mongo_client.dataset
collection = db['reddit_data']
import time
start = time.time()
np.random.seed(42)
for i, row in data.iterrows():
comments = []
num_comm = 10
submission = reddit.submission(id=row['id'])
l = len(submission.comments)
if l > 0:
if l < 10:
num_comm = l
r = np.random.choice(l, num_comm, replace=False)
for i in r:
comments.append(submission.comments[i].body)
t = {'created_utc': row['created_utc'],
'title': row['title'],
'selftext': row['selftext'],
'author': row['author'],
'num_comments': row['num_comments'],
'id': row['id'],
'link_flair_text': row['link_flair_text'],
'comments': comments,
'url': row['url'],
'score': row['score'],
'over_18': row['over_18']}
collection.insert(t)
print ((time.time()-start)/60)
```
We can now export this dataset as json file
```
!mongoexport --db dataset -c reddit_dataset --out ./reddit_data.json
```
### Note: We manually split the dataset from this collection to training and test sets by a 80/20 split into train.json and test.json available in the github repo
| github_jupyter |
```
import os, platform, pprint, sys
import fastai
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
import yellowbrick as yb
from fastai.tabular.data import TabularDataLoaders, TabularPandas
from fastai.tabular.all import FillMissing, Categorify, Normalize, tabular_learner, accuracy, ClassificationInterpretation, ShowGraphCallback, RandomSplitter, range_of
from sklearn.base import BaseEstimator
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit, cross_val_score
from lightgbm import LGBMClassifier
import time
from yellowbrick.model_selection import CVScores, LearningCurve, ValidationCurve
seed: int = 14
# set up pretty printer for easier data evaluation
pretty = pprint.PrettyPrinter(indent=4, width=30).pprint
# declare file paths for the data we will be working on
file_path_1: str = '../../data/prepared/baseline/Benign_vs_DDoS.csv'
file_path_2: str = '../../data/prepared/timebased/Benign_vs_DDoS.csv'
dataPath : str = './models'
# enumerate dataset types
Baseline : int = 0
Timebased: int = 1
# print library and python versions for reproducibility
print(
f'''
python:\t{platform.python_version()}
\tfastai:\t\t{fastai.__version__}
\tmatplotlib:\t{mpl.__version__}
\tnumpy:\t\t{np.__version__}
\tpandas:\t\t{pd.__version__}
\tsklearn:\t{sklearn.__version__}
\tyellowbrick:\t{yb.__version__}
'''
)
def load_data(filePath: str) -> pd.DataFrame:
'''
Loads the Dataset from the given filepath and caches it for quick access in the future
Function will only work when filepath is a .csv file
'''
# slice off the ./CSV/ from the filePath
if filePath[0] == '.' and filePath[1] == '.':
filePathClean: str = filePath[17::]
pickleDump: str = f'../../data/cache/{filePathClean}.pickle'
else:
pickleDump: str = f'../../data/cache/{filePath}.pickle'
print(f'Loading Dataset: {filePath}')
print(f'\tTo Dataset Cache: {pickleDump}\n')
# check if data already exists within cache
if os.path.exists(pickleDump):
df = pd.read_pickle(pickleDump)
# if not, load data and cache it
else:
df = pd.read_csv(filePath, low_memory=True)
df.to_pickle(pickleDump)
return df
baseline_df : pd.DataFrame = load_data(file_path_1)
timebased_df: pd.DataFrame = load_data(file_path_2)
def run_linear_discriminant_experiment(df, name) -> tuple:
'''
Run binary classification using K-Nearest Neighbors
returns the 7-tuple with the following indicies:
viz_data: tuple = (name, model, classes, X_train, y_train, X_test, y_test)
'''
# First we split the features into the dependent variable and
# continous and categorical features
dep_var: str = 'Label'
if 'Protocol' in df.columns:
categorical_features: list = ['Protocol']
else:
categorical_features: list = []
continuous_features = list(set(df) - set(categorical_features) - set([dep_var]))
# Next, we set up the feature engineering pipeline, namely filling missing values
# encoding categorical features, and normalizing the continuous features
# all within a pipeline to prevent the normalization from leaking details
# about the test sets through the normalized mapping of the training sets
procs = [FillMissing, Categorify, Normalize]
splits = RandomSplitter(valid_pct=0.2, seed=seed)(range_of(df))
# The dataframe is loaded into a fastai datastructure now that
# the feature engineering pipeline has been set up
to = TabularPandas(
df , y_names=dep_var ,
splits=splits , cat_names=categorical_features ,
procs=procs , cont_names=continuous_features ,
)
# We use fastai to quickly extract the names of the classes as they are mapped to the encodings
dls = to.dataloaders(bs=64)
mds = tabular_learner(dls)
classes: list = list(mds.dls.vocab)
# We extract the training and test datasets from the dataframe
X_train = to.train.xs.reset_index(drop=True)
X_test = to.valid.xs.reset_index(drop=True)
y_train = to.train.ys.values.ravel()
y_test = to.valid.ys.values.ravel()
# Now that we have the train and test datasets, we set up a K-NN classifier
# using SciKitLearn and print the results
model = LGBMClassifier()
t0 = time.time()
model.fit(X_train, y_train)
train_time = time.time()-t0
prediction = model.predict(X_test)
print(f'\tAccuracy: {accuracy_score(y_test, prediction)}\n Time: {train_time} seconds')
report = classification_report(y_test, prediction)
print(report)
# we add a target_type_ attribute to our model so yellowbrick knows how to make the visualizations
if len(classes) == 2:
model.target_type_ = 'binary'
elif len(classes) > 2:
model.target_type_ = 'multiclass'
else:
print('Must be more than one class to perform classification')
raise ValueError('Wrong number of classes')
# Now that the classifier has been created and trained, we pass out our training values
# so that yellowbrick can use them to create various visualizations
viz_data: tuple = (name, model, classes, X_train, y_train, X_test, y_test)
return viz_data
def visualize_learning_curve_train(results: tuple) -> None:
'''
Takes a 7-tuple from the run_experiments function and creates a learning curve
results: tuple = (name, model, classes, X_train, y_train, X_test, y_test)
'''
# Track the learning curve of the classifier, here we want the
# training and validation scores to approach 1
visualizer = LearningCurve(results[1], scoring='f1_weighted')
visualizer.fit(results[3], results[4])
visualizer.show()
def visualize_learning_curve_test(results: tuple) -> None:
'''
Takes a 7-tuple from the run_experiments function and creates a learning curve
results: tuple = (name, model, classes, X_train, y_train, X_test, y_test)
'''
# Track the learning curve of the classifier, here we want the
# training and validation scores to approach 1
visualizer = LearningCurve(results[1], scoring='f1_weighted')
visualizer.fit(results[5], results[6])
visualizer.show()
def visualize_confusion_matrix(viz_data: tuple) -> None:
'''
Takes a 7-tuple from the run_experiments function and creates a confusion matrix
viz_data: tuple = (name, model, classes, X_train, y_train, X_test, y_test)
'''
visualizer = yb.classifier.ConfusionMatrix(viz_data[1], classes=viz_data[2], title=viz_data[0])
visualizer.score(viz_data[5], viz_data[6])
visualizer.show()
def visualize_roc(viz_data: tuple) -> None:
'''
Takes a 7-tuple from the run_experiments function and creates a
Receiver Operating Characteristic (ROC) Curve
viz_data: tuple = (name, model, classes, X_train, y_train, X_test, y_test)
'''
visualizer = yb.classifier.ROCAUC(viz_data[1], classes=viz_data[2], title=viz_data[0])
visualizer.score(viz_data[5], viz_data[6])
visualizer.poof()
def visualize_pr_curve(viz_data: tuple) -> None:
'''
Takes a 7-tuple from the run_experiments function and creates a
Precision-Recall Curve
viz_data: tuple = (name, model, classes, X_train, y_train, X_test, y_test)
'''
visualizer = yb.classifier.PrecisionRecallCurve(viz_data[1], title=viz_data[0])
visualizer.score(viz_data[5], viz_data[6])
visualizer.poof()
def visualize_report(viz_data: tuple) -> None:
'''
Takes a 7-tuple from the run_experiments function and creates a report
detailing the Precision, Recall, f1, and Support scores for all
classification outcomes
viz_data: tuple = (name, model, classes, X_train, y_train, X_test, y_test)
'''
visualizer = yb.classifier.ClassificationReport(viz_data[1], classes=viz_data[2], title=viz_data[0], support=True)
visualizer.score(viz_data[5], viz_data[6])
visualizer.poof()
def visualize_class_balance(viz_data: tuple) -> None:
'''
Takes a 7-tuple from the run_experiments function and creates a histogram
detailing the balance between classification outcomes
viz_data: tuple = (name, model, classes, X_train, y_train, X_test, y_test)
'''
visualizer = yb.target.ClassBalance(labels=viz_data[0])
visualizer.fit(viz_data[4], viz_data[6])
visualizer.show()
viz_data_baseline_nb = run_linear_discriminant_experiment(baseline_df, 'ddos_vs_benign_baseline')
visualize_learning_curve_test(viz_data_baseline_nb)
visualize_report(viz_data_baseline_nb)
visualize_confusion_matrix(viz_data_baseline_nb)
visualize_roc(viz_data_baseline_nb)
visualize_pr_curve(viz_data_baseline_nb)
viz_data_timebased_nb = run_linear_discriminant_experiment(timebased_df, 'ddos_vs_benign_timebased')
visualize_learning_curve_test(viz_data_timebased_nb)
visualize_report(viz_data_timebased_nb)
visualize_confusion_matrix(viz_data_timebased_nb)
visualize_roc(viz_data_timebased_nb)
visualize_pr_curve(viz_data_timebased_nb)
```
| github_jupyter |
# Assignment 3: Hello Vectors
Welcome to this week's programming assignment of the specialization. In this assignment we will explore word vectors. In natural language processing, we represent each word as a vector consisting of numbers. The vector encodes the meaning of the word. These numbers (or weights) for each word are learned using various machine learning models, which we will explore in more detail later in this specialization. Rather than make you code the machine learning models from scratch, we will show you how to use them. In the real world, you can always load the trained word vectors, and you will almost never have to train them from scratch. In this assignment you will
- Predict analogies between words.
- Use PCA to reduce the dimensionality of the word embeddings and plot them in two dimensions.
- Compare word embeddings by using a similarity measure (the cosine similarity).
- Understand how these vector space models work.
## 1.0 Predict the World Capitals
In this part of the assignment, you are asked to predict the capital cities of some countries. You are playing trivia against some second grader who just took their geography test and knows all the capitals by heart. Thanks to NLP, you will be able to answer the questions properly. In other words, you will write a program that can give you the capital of each country. That way you are pretty sure you will win the trivia game. We will start by exploring the data set.
<img src = 'map.jpg' width="width" height="height" style="width:467px;height:300px;"/>
### 1.1 Importing the data
As usual, you start by importing some essential Python libraries and the load dataset. The dataset will be loaded as a [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/getting_started/dsintro.html), which is very a common method in data science. Because of the large size of the data, this may take a few minutes.
```
# Run this cell to import packages.
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import get_vectors
data = pd.read_csv('capitals.txt', delimiter = ' ')
data.columns = ['city1','country1','city2', 'country2']
# print first five elements in the DataFrame
data.head(5)
```
***
### To Run This Code On Your Own Machine:
Note that because the original google news word embedding dataset is about 3.64 gigabytes, the workspace is not able to handle the full file set. So we've downloaded the full dataset, extracted a sample of the words that we're going to analyze in this assignment, and saved it in a pickle file called word_embeddings_capitals.p
If you want to download the full dataset on your own and choose your own set of word embeddings, please see the instructions and some helper code.
- Download the dataset from this [page](https://code.google.com/archive/p/word2vec/).
- Search in the page for 'GoogleNews-vectors-negative300.bin.gz' and click the link to download.
Copy-paste the code below and run it on your local machine after downloading the dataset to the same directory as the notebook.
```python
import nltk
embeddings = KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary = True)
f = open('capitals.txt', 'r').read()
set_words = set(nltk.word_tokenize(f))
select_words = words = ['king', 'queen', 'oil', 'gas', 'happy', 'sad', 'city', 'town', 'village', 'country', 'continent', 'petroleum', 'joyful']
for w in select_words:
set_words.add(w)
def get_word_embeddings(embeddings):
word_embeddings = {}
for word in embeddings.vocab:
if word in set_words:
word_embeddings[word] = embeddings[word]
return word_embeddings
# Testing your function
word_embeddings = get_word_embeddings(embeddings)
print(len(word_embeddings))
pickle.dump( word_embeddings, open( "word_embeddings_subset.p", "wb" ) )
```
***
Now we will load the word embeddings as a [Python dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries). As stated, these have already been obtained through a machine learning algorithm.
```
word_embeddings = pickle.load( open( "word_embeddings_subset.p", "rb" ) )
len(word_embeddings) #there should be 243 words that will be used in this assignment
```
Each of the word embedding is a 300-dimensional vector.
```
print("dimension: {}".format(word_embeddings['Spain'].shape[0]))
```
### Predict relationships among words
Now you will write a function that will use the word embeddings to predict relationships among words.
* The function will take as input three words.
* The first two are related to each other.
* It will predict a 4th word which is related to the third word in a similar manner as the two first words are related to each other.
* As an example, "Athens is to Greece as Bangkok is to ______"?
* You will write a program that is capable of finding the fourth word.
* We will give you a hint to show you how to compute this.
A similar analogy would be the following:
<img src = 'vectors.jpg' width="width" height="height" style="width:467px;height:200px;"/>
You will implement a function that can tell you the capital of a country. You should use the same methodology shown in the figure above. To do this, compute you'll first compute cosine similarity metric or the Euclidean distance.
### 1.2 Cosine Similarity
The cosine similarity function is:
$$\cos (\theta)=\frac{\mathbf{A} \cdot \mathbf{B}}{\|\mathbf{A}\|\|\mathbf{B}\|}=\frac{\sum_{i=1}^{n} A_{i} B_{i}}{\sqrt{\sum_{i=1}^{n} A_{i}^{2}} \sqrt{\sum_{i=1}^{n} B_{i}^{2}}}\tag{1}$$
$A$ and $B$ represent the word vectors and $A_i$ or $B_i$ represent index i of that vector.
& Note that if A and B are identical, you will get $cos(\theta) = 1$.
* Otherwise, if they are the total opposite, meaning, $A= -B$, then you would get $cos(\theta) = -1$.
* If you get $cos(\theta) =0$, that means that they are orthogonal (or perpendicular).
* Numbers between 0 and 1 indicate a similarity score.
* Numbers between -1-0 indicate a dissimilarity score.
**Instructions**: Implement a function that takes in two word vectors and computes the cosine distance.
<details>
<summary>
<font size="3" color="darkgreen"><b>Hints</b></font>
</summary>
<p>
<ul>
<li> Python's<a href="https://docs.scipy.org/doc/numpy/reference/" > NumPy library </a> adds support for linear algebra operations (e.g., dot product, vector norm ...).</li>
<li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html" > numpy.dot </a>.</li>
<li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html">numpy.linalg.norm </a>.</li>
</ul>
</p>
```
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def cosine_similarity(A, B):
'''
Input:
A: a numpy array which corresponds to a word vector
B: A numpy array which corresponds to a word vector
Output:
cos: numerical number representing the cosine similarity between A and B.
'''
# you have to set this variable to the true label.
cos = -10
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
dot = np.dot(A, B)
norma = np.linalg.norm(A)
normb = np.linalg.norm(B)
cos = dot / (norma * normb)
### END CODE HERE ###
return cos
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
# feel free to try different words
king = word_embeddings['king']
queen = word_embeddings['queen']
cosine_similarity(king, queen)
```
**Expected Output**:
$\approx$ 0.6510956
### 1.3 Euclidean distance
You will now implement a function that computes the similarity between two vectors using the Euclidean distance.
Euclidean distance is defined as:
$$ \begin{aligned} d(\mathbf{A}, \mathbf{B})=d(\mathbf{A}, \mathbf{B}) &=\sqrt{\left(A_{1}-B_{1}\right)^{2}+\left(A_{2}-B_{2}\right)^{2}+\cdots+\left(A_{n}-B_{n}\right)^{2}} \\ &=\sqrt{\sum_{i=1}^{n}\left(A_{i}-B_{i}\right)^{2}} \end{aligned}$$
* $n$ is the number of elements in the vector
* $A$ and $B$ are the corresponding word vectors.
* The more similar the words, the more likely the Euclidean distance will be close to 0.
**Instructions**: Write a function that computes the Euclidean distance between two vectors.
<details>
<summary>
<font size="3" color="darkgreen"><b>Hints</b></font>
</summary>
<p>
<ul>
<li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html" > numpy.linalg.norm </a>.</li>
</ul>
</p>
```
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def euclidean(A, B):
"""
Input:
A: a numpy array which corresponds to a word vector
B: A numpy array which corresponds to a word vector
Output:
d: numerical number representing the Euclidean distance between A and B.
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# euclidean distance
d = np.linalg.norm(A - B)
### END CODE HERE ###
return d
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
# Test your function
euclidean(king, queen)
```
**Expected Output:**
2.4796925
### 1.4 Finding the country of each capital
Now, you will use the previous functions to compute similarities between vectors, and use these to find the capital cities of countries. You will write a function that takes in three words, and the embeddings dictionary. Your task is to find the capital cities. For example, given the following words:
- 1: Athens 2: Greece 3: Baghdad,
your task is to predict the country 4: Iraq.
**Instructions**:
1. To predict the capital you might want to look at the *King - Man + Woman = Queen* example above, and implement that scheme into a mathematical function, using the word embeddings and a similarity function.
2. Iterate over the embeddings dictionary and compute the cosine similarity score between your vector and the current word embedding.
3. You should add a check to make sure that the word you return is not any of the words that you fed into your function. Return the one with the highest score.
```
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def get_country(city1, country1, city2, embeddings):
"""
Input:
city1: a string (the capital city of country1)
country1: a string (the country of capital1)
city2: a string (the capital city of country2)
embeddings: a dictionary where the keys are words and
Output:
countries: a dictionary with the most likely country and its similarity score
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# store the city1, country 1, and city 2 in a set called group
group = set( (city1, country1, city2) )
# get embeddings of city 1
city1_emb = word_embeddings[city1]
# get embedding of country 1
country1_emb = word_embeddings[country1]
# get embedding of city 2
city2_emb = word_embeddings[city2]
# get embedding of country 2 (it's a combination of the embeddings of country 1, city 1 and city 2)
vec = country1_emb - city1_emb + city2_emb
# Initialize the similarity to -1 (it will be replaced by a similarities that are closer to +1)
similarity = -1
# initialize country to an empty string
country = ''
# loop through all words in the embeddings dictionary
for word in embeddings.keys():
# first check that the word is not already in the 'group'
if word not in group:
# get the word embedding
word_emb = word_embeddings[word]
# calculate cosine similarity
cur_similarity = cosine_similarity(vec, word_emb)
# if the cosine similarity is more similar than the previously best similarity...
if cur_similarity > similarity:
# update the similarity to the new, better similarity
similarity = cur_similarity
# store the country as a tuple, which contains the word and the similarity
country = (word, similarity)
### END CODE HERE ###
return country
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
# Testing your function, note to make it more robust you can return the 5 most similar words.
get_country('Athens', 'Greece', 'Cairo', word_embeddings)
```
**Expected Output:**
('Egypt', 0.7626821)
### 1.5 Model Accuracy
Now you will test your new function on the dataset and check the accuracy of the model:
$$\text{Accuracy}=\frac{\text{Correct # of predictions}}{\text{Total # of predictions}}$$
**Instructions**: Write a program that can compute the accuracy on the dataset provided for you. You have to iterate over every row to get the corresponding words and feed them into you `get_country` function above.
<details>
<summary>
<font size="3" color="darkgreen"><b>Hints</b></font>
</summary>
<p>
<ul>
<li>Use <a href="https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.iterrows.html" > pandas.DataFrame.iterrows </a>.</li>
</ul>
</p>
```
def get_accuracy(word_embeddings, data):
'''
Input:
word_embeddings: a dictionary where the key is a word and the value is its embedding
data: a pandas data frame as
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# initialize num correct to zero
num_correct = 0
# loop through the rows of the dataframe
for i, row in data.iterrows():
# get city1
city1 = row.city1
# get country1
country1 = row.country1
# get city2
city2 = row.city2
# get country2
country2 = row.country2
#use get_country to find the predicted country2
predicted_country2, _ = get_country(city1, country1, city2, word_embeddings)
# if the predicted country2 is the same as the actual country2...
if predicted_country2 == country2:
# increment the number of correct by 1
num_correct += 1
# get the number of rows in the data dataframe (length of dataframe)
m = len(data)
#calcualte the accuracy by dividing the number correct by m
accuracy = num_correct / m
### END CODE HERE ###
return accuracy
```
**NOTE: The cell below takes about 30 SECONDS to run.**
```
# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything
accuracy = get_accuracy(word_embeddings, data)
print(f"Accuracy is {accuracy:.2f}")
```
**Expected Output:**
$\approx$ 0.92 %
# 3.0 Plotting the vectors using PCA
Now you will explore the distance between word vectors after reducing their dimension.
The technique we will employ is known as [*principal component analysis* (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis). As we saw, we are working in a 300-dimensional space in this case. Although from a computational perspective we were able to perform a good job, it is impossible to visualize results in such high dimensional spaces.
You can think of PCA as a method that projects our vectors in a space of reduced dimension, while keeping the maximum information about the original vectors in their reduced counterparts. In this case, by *maximum infomation* we mean that the Euclidean distance between the original vectors and their projected siblings is minimal. Hence vectors that were originally close in the embeddings dictionary, will produce lower dimensional vectors that are still close to each other.
You will see that when you map out the words, similar words will be clustered next to each other. For example, the words 'sad', 'happy', 'joyful' all describe emotion and are supposed to be near each other when plotted. The words: 'oil', 'gas', and 'petroleum' all describe natural resources. Words like 'city', 'village', 'town' could be seen as synonyms and describe a similar thing.
Before plotting the words, you need to first be able to reduce each word vector with PCA into 2 dimensions and then plot it. The steps to compute PCA are as follows:
1. Mean normalize the data
2. Compute the covariance matrix of your data ($\Sigma$).
3. Compute the eigenvectors and the eigenvalues of your covariance matrix
4. Multiply the first K eigenvectors by your normalized data. The transformation should look something as follows:
<img src = 'word_embf.jpg' width="width" height="height" style="width:800px;height:200px;"/>
**Instructions**:
You will write a program that takes in a data set where each row corresponds to a word vector.
* The word vectors are of dimension 300.
* Use PCA to change the 300 dimensions to `n_components` dimensions.
* The new matrix should be of dimension `m, n_componentns`.
* First de-mean the data
* Get the eigenvalues using `linalg.eigh`. Use 'eigh' rather than 'eig' since R is symmetric. The performance gain when using eigh instead of eig is substantial.
* Sort the eigenvectorsand eigenvalues by decreasing order of the eigenvalues.
* Get a subset of the eigenvectors (choose how many principle components you want to use using n_components).
* Return the new transformation of the data by multiplying the eigenvectors with the original data.
<details>
<summary>
<font size="3" color="darkgreen"><b>Hints</b></font>
</summary>
<p>
<ul>
<li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html" > numpy.mean(a,axis=None) </a> : If you set <code>axis = 0</code>, you take the mean for each column. If you set <code>axis = 1</code>, you take the mean for each row. Remember that each row is a word vector, and the number of columns are the number of dimensions in a word vector. </li>
<li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html" > numpy.cov(m, rowvar=True) </a>. This calculates the covariance matrix. By default <code>rowvar</code> is <code>True</code>. From the documentation: "If rowvar is True (default), then each row represents a variable, with observations in the columns." In our case, each row is a word vector observation, and each column is a feature (variable). </li>
<li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html" > numpy.linalg.eigh(a, UPLO='L') </a> </li>
<li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html" > numpy.argsort </a> sorts the values in an array from smallest to largest, then returns the indices from this sort. </li>
<li>In order to reverse the order of a list, you can use: <code>x[::=1]</code>.</li>
<li>To apply the sorted indices to eigenvalues, you can use this format <code>x[indices_sorted]</code>.</li>
<li>When applying the sorted indices to eigen vectors, note that each column represents an eigenvector. In order to preserve the rows but sort on the columns, you can use this format <code>x[:,indices_sorted]</code></li>
<li>To transform the data using a subset of the most relevant principle components, take the matrix multiplication of the eigenvectors with the original data. </li>
<li>The data is of shape <code>(n_observations, n_features)</code>. </li>
<li>The subset of eigenvectors are in a matrix of shape <code>(n_features, n_components)</code>.</li>
<li>To multiply these together, take the transposes of both the eigenvectors <code>(n_components, n_features)</code> and the data (n_features, n_observations).</li>
<li>The product of these two has dimensions <code>(n_components,n_observations)</code>. Take its transpose to get the shape <code>(n_observations, n_components)</code>.</li>
</ul>
</p>
```
# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def compute_pca(X, n_components=2):
"""
Input:
X: of dimension (m,n) where each row corresponds to a word vector
n_components: Number of components you want to keep.
Output:
X_reduced: data transformed in 2 dims/columns + regenerated original data
pass in: data as 2D NumPy array
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# mean center the data
X_demeaned = X - np.mean(X.T, axis=1)
# calculate the covariance matrix
covariance_matrix = np.cov(X_demeaned.T, rowvar=True)
# calculate eigenvectors & eigenvalues of the covariance matrix
eigen_vals, eigen_vecs = np.linalg.eigh(covariance_matrix)
# sort eigenvalue in decreasing order (get the indices from the sort)
idx_sorted = np.argsort(eigen_vals)
# reverse the order so that it's from highest to lowest.
idx_sorted_decreasing = idx_sorted[::-1]
# sort the eigen values by idx_sorted_decreasing
eigen_vals_sorted = eigen_vals[idx_sorted_decreasing]
# sort eigenvectors using the idx_sorted_decreasing indices
eigen_vecs_sorted = eigen_vecs[:, idx_sorted_decreasing]
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
eigen_vecs_subset = eigen_vecs_sorted[:, :n_components]
# transform the data by multiplying the transpose of the eigenvectors#
# with the transpose of the de-meaned data
# Then take the transpose of that product.
X_reduced = np.dot(eigen_vecs_subset.T, X_demeaned.T).T
### END CODE HERE ###
return X_reduced
# Testing your function
np.random.seed(1)
X = np.random.rand(3,10)
X_reduced = compute_pca(X, n_components=2)
print("Your original matrix was "+ str(X.shape) + " and it became:")
print(X_reduced)
```
**Expected Output:**
Your original matrix was: (3,10) and it became:
<table>
<tr>
<td>
0.43437323
</td>
<td>
0.49820384
</td>
</tr>
<tr>
<td>
0.42077249
</td>
<td>
-0.50351448
</td>
</tr>
<tr>
<td>
-0.85514571
</td>
<td>
0.00531064
</td>
</tr>
</table>
Now you will use your pca function to plot a few words we have chosen for you. You will see that similar words tend to be clustered near each other. Sometimes, even antonyms tend to be clustered near each other. Antonyms describe the same thing but just tend to be on the other end of the scale. They are usually found in the same location of a sentence, have the same parts of speech, and thus when learning the word vectors, you end up getting similar weights. In the next week we will go over how you learn them, but for now let's just enjoy using them.
**Instructions:** Run the cell below.
```
words = ['oil', 'gas', 'happy', 'sad', 'city', 'town', 'village', 'country', 'continent', 'petroleum', 'joyful']
# given a list of words and the embeddings, it returns a matrix with all the embeddings
X = get_vectors(word_embeddings, words)
print('You have 11 words each of 300 dimensions thus X.shape is:', X.shape)
# We have done the plotting for you. Just run this cell.
result= compute_pca(X, 2)
plt.scatter(result[:, 0], result[:, 1])
for i, word in enumerate(words):
plt.annotate(word, xy=(result[i, 0]-0.1, result[i, 1]))
plt.show()
```
**What do you notice?**
The word vecotrs for gas, oil and petroleum appear related to each other, because their vectors are close to each other. Similarly, sad, joyful and happy all express emotions, and are also near each other.
| github_jupyter |
# Detecting and Classifying Toxic Comments
# Part 3-1: TF*IDF & Random Forest Classifiers
It may be possible to employ sequential binary models in order to get better results with rarer cases.
If we first classify Toxic and Not Toxic, we could further process only the Toxic results against models that had been trained only to recognise sub-classes of toxic models.
## Python Library Imports
```
import pandas as pd
import numpy as np
```
## Import spaCy
```
import spacy
from spacy.lang.en import English
spacy_stopwords = spacy.lang.en.stop_words.STOP_WORDS
from spacy.tokens import Doc
# import custom trained spaCy model
nlp = spacy.load("../models/spacy_2/")
```
## Import nltk
```
# nltk imports
import nltk
from nltk.corpus import stopwords
```
## Import Custom Functions
```
import sys
# add src folder to path
sys.path.insert(1, '../src')
# from text_prep import tidy_series, uppercase_proportion_column
from spacy_helper import doc_check
```
# Getting info from preserved spaCy docs
I've had a little difficulty with getting the doc properties to un-pickle and maintain the ability to further process them later. docs seem to depend on some vocab properties of the model that are not saved within the doc itself.
```
%%time
'''
CPU times: user 3min 44s, sys: 33.5 s, total: 4min 17s
Wall time: 4min 32s
'''
X_train = pd.read_pickle('../data/basic_df_split/X_train_2-1.pkl')
# load y_train
! ls ../data/basic_df_split/
y_train = pd.read_pickle('../data/basic_df_split/basic_y_train.pkl')
X_test = pd.read_pickle('../data/basic_df_split/basic_X_test.pkl')
y_test = pd.read_pickle('../data/basic_df_split/basic_y_test.pkl')
X_train.columns
```
### Create list of lemmas, less nltk stopwords
```
stopw_set = set(stopwords.words('english'))
%%time
# remove lemmas that appear in nltk stopword list
X_train['lemmas_less'] = X_train['lemmas'].apply(lambda row: [lemma for lemma in row if lemma not in stopw_set])
```
## Further reduce lemmas by min_length & max_length
Exploration of corpus vocabulary suggests that lemmas of 2 or fewer characters are likely not very useful and can be removed to reduce features.
Lemmas of longer than 20 characters are often run-on words (where spaces have been omitted). Although a few of them have words hidden within them that may be considered toxic, the rarity and non-standard format make them unlikely to be generalizable.
```
%%time
min_l = 3
max_l = 20
X_train['lemmas_less'] = X_train['lemmas'].apply(lambda row: [lemma for lemma in row if len(lemma) >= min_l or len(lemma) <= max_l])
```
# TF*IDF
## Scikit Learn Imports
```
from sklearn.feature_extraction.text import TfidfVectorizer
%%time
tfidf_sklearn = TfidfVectorizer(ngram_range = (1,3),
min_df = 2)
# return sparse matrix
# join list into individual strings
tfidf_values = tfidf_sklearn.fit_transform(X_train['lemmas_less'].apply(lambda x: " ".join(x)))
'''
last values (with min_df as 1):
<106912x4089577 sparse matrix of type '<class 'numpy.float64'>'
with 8002628 stored elements in Compressed Sparse Row format>
'''
tfidf_values
%%time
'''
CPU times: user 11.9 s, sys: 8.27 s, total: 20.2 s
Wall time: 23 s
'''
lemmas_less_tfidf = pd.DataFrame(tfidf_values.toarray(),
columns=tfidf_sklearn.get_feature_names())
lemmas_less_tfidf.shape
y_train['toxic'].shape
# sum(lemmas_less_tfidf['jerk'])
# search_term = 'jerk'
# # running this portion will crash the kernel
# bool_mask = lemmas_less_tfidf.sort_values(search_term, ascending=False)[search_term][:10]
# bool_mask
```
# Toxic: Random Forest Classifier
Resources:
- [Explanation of Warm Start for RFC (not what you may think)](https://stackoverflow.com/questions/42757892/how-to-use-warm-start/42763502)
- [TfidfVectorizer Docs](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer)
```
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
toxic_rfc = RandomForestClassifier(n_estimators=100,
max_depth = 10,
oob_score=True,
n_jobs=-1,
random_state=42,
warm_start=True)
X = lemmas_less_tfidf
y = y_train
toxic_logistic = LogisticRegression(warm_start=True,
random_state=42,
verbose=True,
solver='sag',
multi_class='ovr',
max_iter=100,
n_jobs=-1)
y.columns
%%time
toxic_logistic.fit(X[:10],y['obscene'][:10])
def batch_train(X, y, model, batch_size=1000, verbose=False, start=0):
remaining = len(X)
if batch_size > remaining:
bach_size = remaining
b = start
e = batch_size
while e <= remaining:
model.fit(X[b:e], y[b:e])
#
# naive_toxic = BernoulliNB
# print(vect_X_train.shape)
# print(y_train.shape)
# print(X_train.shape)
# vect_X_train.iloc[0]
# naive_toxic.fit(vect_X_train, y_train)
%load_ext autoreload
%autoreload 2
import sys
# add src folder to path
sys.path.insert(1, '../tokens')
# from text_prep import tidy_series, uppercase_proportion_column
from twitter_tok import twitter_api_key, api_secret_key, bearer_token, eml, password
import requests
# import sys
# class ListStream:
# def __init__(self):
# self.data = []
# def write(self, s):
# self.data.append(s)
# sys.stdout = x = ListStream()
# for i in range(2):
# print ('i = ', i)
# sys.stdout = sys.__stdout__
# print(x.data)
```
https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/master/Sampled-Stream/sampled-stream.py
https://developer.twitter.com/en/docs/twitter-api/tweets/sampled-stream/introduction
```
%load_ext autoreload
%autoreload 2
sys.path.insert(1, '../tokens')
from twitter_stream import *
import requests
import os
import json
import os
import tweepy as tw
import pandas as pd
```
Had little luck with get old tweets
https://pypi.org/project/GetOldTweets3/
```
import twitter_rules
# twitter_rules.main()
list_ = list()
twitter_rules.main(text_list=list_, total_tweets=10)
print(len(list_))
list_
```
Resources:
- [Building Twitter Rules](https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/integrate/build-a-rule)
| github_jupyter |
```
import argparse
import json
import mxnet as mx
import tensorflow as tf
from tensorflow.python.framework import graph_util
import numpy as np
from converter import Converter
import os
import time
%reload_ext autoreload
%autoreload 2
def main(model_prefix, output_prefix, input_h=128, input_w = 128):
# Parsing JSON is easier because it contains operator name
js_model = json.load(open(model_prefix + '-symbol.json', 'r'))
mx_model, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
params = arg_params
params.update(aux_params)
tf.reset_default_graph()
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
a = 0
with tf.Session(config=config) as sess:
tf_nodes = dict()
# Workaround for input node
input_data = tf.placeholder('float32', (1, input_h, input_w, 3), name='data')
tf_nodes['data'] = input_data
nodes = js_model['nodes']
conv = Converter(tf_nodes, nodes, params)
for node_idx, node in enumerate(nodes):
op = node['op']
print('Parsing node %s with operator %s and index %d' % (node['name'], op, node_idx))
# Hack for older versions of MxNet
if 'param' in node:
node['attrs'] = node['param']
if op == 'BatchNorm':
conv.create_bn(node)
elif op == 'elemwise_add' or op == '_Plus':
conv.create_elementwise(node)
elif op == 'Activation':
conv.create_activation(node)
elif op == 'SoftmaxOutput':
conv.create_softmax(node)
elif op == 'Convolution':
conv.create_conv(node)
elif op == 'Pooling':
conv.create_pooling(node)
elif op == 'Flatten':
conv.create_flatten(node)
elif op == 'FullyConnected':
conv.create_fc(node)
elif op == 'L2Normalization':
conv.create_norm(node)
elif op == "Concat":
conv.create_concat(node)
elif op == "Reshape":
conv.create_reshape(node)
elif op == "Crop":
conv.create_crop(node)
elif op == "UpSampling":
conv.create_upsampling(node)
#conv.create_upsampling_v2(node)
elif op == "SoftmaxActivation":
conv.create_softmaxactivation(node)
elif op == 'null':
#print("this is just a param, do not create op")
a += 1
else:
print("------------------unsupported op!!!----------------------")
#print("null nodes:", a)
#test run
output_node_names = []
for i in range(len(js_model['heads'])):
output_node_names.append(nodes[js_model['heads'][i][0]]['name'])
print("output node names:", output_node_names)
output_nodes = []
print("------------------------------Output nodes:--------------------------")
for i in range(len(output_node_names)):
print(tf_nodes[output_node_names[i]])
output_nodes.append(tf_nodes[output_node_names[i]])
# test define my own conv layer after
output_nodes_rev = []
#################### stride 32 ################################
output_nodes_rev.append(output_nodes[0])
tmp = output_nodes[3]
with tf.variable_scope("", reuse=tf.AUTO_REUSE):
bbox_stride32_w = tf.get_variable("face_rpn_bbox_pred_stride32_weight")
bbox_stride32_b = tf.get_variable("face_rpn_bbox_pred_stride32_bias")
landmark_stride32_w = tf.get_variable("face_rpn_landmark_pred_stride32_weight")
landmark_stride32_b = tf.get_variable("face_rpn_landmark_pred_stride32_bias")
#print("shape: ", bbox_stride32_w.shape, bbox_stride32_b.shape)
tmp1 = tf.nn.conv2d(tmp, bbox_stride32_w, [1, 1, 1, 1], padding='VALID')
tmp1 = tf.add(tmp1, bbox_stride32_b, name = "face_rpn_bbox_pred_stride32_rev")
tmp2 = tf.nn.conv2d(tmp, landmark_stride32_w, [1, 1, 1, 1], padding='VALID')
tmp2 = tf.add(tmp2, landmark_stride32_b, name = "face_rpn_landmark_pred_stride32_rev")
output_nodes_rev.append(tmp1)
output_nodes_rev.append(tmp2)
#################### stride 16 ################################
output_nodes_rev.append(output_nodes[1])
tmp = output_nodes[4]
with tf.variable_scope("", reuse=tf.AUTO_REUSE):
bbox_stride16_w = tf.get_variable("face_rpn_bbox_pred_stride16_weight")
bbox_stride16_b = tf.get_variable("face_rpn_bbox_pred_stride16_bias")
landmark_stride16_w = tf.get_variable("face_rpn_landmark_pred_stride16_weight")
landmark_stride16_b = tf.get_variable("face_rpn_landmark_pred_stride16_bias")
tmp1 = tf.nn.conv2d(tmp, bbox_stride16_w, [1, 1, 1, 1], padding='VALID')
tmp1 = tf.add(tmp1, bbox_stride16_b, name = "face_rpn_bbox_pred_stride16_rev")
tmp2 = tf.nn.conv2d(tmp, landmark_stride16_w, [1, 1, 1, 1], padding='VALID')
tmp2 = tf.add(tmp2, landmark_stride16_b, name = "face_rpn_landmark_pred_stride16_rev")
output_nodes_rev.append(tmp1)
output_nodes_rev.append(tmp2)
#################### stride 8 ################################
output_nodes_rev.append(output_nodes[2])
tmp = output_nodes[5]
with tf.variable_scope("", reuse=tf.AUTO_REUSE):
bbox_stride8_w = tf.get_variable("face_rpn_bbox_pred_stride8_weight")
bbox_stride8_b = tf.get_variable("face_rpn_bbox_pred_stride8_bias")
landmark_stride8_w = tf.get_variable("face_rpn_landmark_pred_stride8_weight")
landmark_stride8_b = tf.get_variable("face_rpn_landmark_pred_stride8_bias")
tmp1 = tf.nn.conv2d(tmp, bbox_stride8_w, [1, 1, 1, 1], padding='VALID')
tmp1 = tf.add(tmp1, bbox_stride8_b, name = "face_rpn_bbox_pred_stride8_rev")
tmp2 = tf.nn.conv2d(tmp, landmark_stride8_w, [1, 1, 1, 1], padding='VALID')
tmp2 = tf.add(tmp2, landmark_stride8_b, name = "face_rpn_landmark_pred_stride8_rev")
output_nodes_rev.append(tmp1)
output_nodes_rev.append(tmp2)
print("------------------------output nodes rev:----------------------")
for node in output_nodes_rev:
print(node)
print("---------------------test inference-------------------")
for i in range(5):
start = time.time()
out = sess.run(output_nodes_rev, feed_dict = {input_data: np.zeros([1,input_h,input_w,3])})
end = time.time()
print("inference time: %.6f" %(end-start))
for i in range(len(out)):
print("output node shape:", out[i].transpose(0,3,1,2).shape)
#'''
g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
print("number of variables (trainable & global):", len(g_vars))
saver = tf.train.Saver(g_vars)
saver.save(sess, os.path.join("./checkpoint", "mnet.25-%dx%d"%(input_h, input_w)), write_meta_graph=True)
mx_prefix = "models/mnet.25"
tf_prefix = "xxx"
input_h = 1080
input_w = 1920
main(mx_prefix, tf_prefix, input_h, input_w)
```
| github_jupyter |
<h1 align="center">TensorFlow Neural Network Lab</h1>
<img src="image/notmnist.png">
In this lab, you'll use all the tools you learned from *Introduction to TensorFlow* to label images of English letters! The data you are using, <a href="http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html">notMNIST</a>, consists of images of a letter from A to J in differents font.
The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!
To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "`All modules imported`".
```
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
```
The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
```
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
```
<img src="image/mean_variance.png" style="height: 75%;width: 75%; position: relative; right: 5%">
## Problem 1
The first problem involves normalizing the features for your training and test data.
Implement Min-Max scaling in the `normalize()` function to a range of `a=0.1` and `b=0.9`. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.
Since the raw notMNIST image data is in [grayscale](https://en.wikipedia.org/wiki/Grayscale), the current values range from a min of 0 to a max of 255.
Min-Max Scaling:
$
X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}
$
*If you're having trouble solving problem 1, you can view the solution [here](https://github.com/udacity/CarND-TensorFlow-Lab/blob/master/solutions.ipynb).*
```
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
```
# Checkpoint
All your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
```
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
```
<img src="image/weight_biases.png" style="height: 60%;width: 60%; position: relative; right: 10%">
## Problem 2
For the neural network to train on your data, you need the following <a href="https://www.tensorflow.org/api_docs/python/tf/dtypes/DType">float32</a> tensors:
- `features`
- Placeholder tensor for feature data (`train_features`/`valid_features`/`test_features`)
- `labels`
- Placeholder tensor for label data (`train_labels`/`valid_labels`/`test_labels`)
- `weights`
- Variable Tensor with random numbers from a truncated normal distribution.
- See <a href="https://www.tensorflow.org/api_docs/python/tf/random/truncated_normal">`tf.truncated_normal()` documentation</a> for help.
- `biases`
- Variable Tensor with all zeros.
- See <a href="https://www.tensorflow.org/api_docs/python/tf/zeros"> `tf.zeros()` documentation</a> for help.
*If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available [here](https://github.com/udacity/CarND-TensorFlow-Lab/blob/master/solutions.ipynb).*
```
features_count = 784
labels_count = 10
# TODO: Set the features and labels tensors
# features =
# labels =
# TODO: Set the weights and biases tensors
# weights =
# biases =
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), axis=1)
# some students have encountered challenges using this function, and have resolved issues
# using https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits
# please see this thread for more detail https://discussions.udacity.com/t/accuracy-0-10-in-the-intro-to-tensorflow-lab/272469/9
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
```
<img src="image/learn_rate_tune.png" style="height: 60%;width: 60%">
## Problem 3
Below are 3 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.
Parameter configurations:
Configuration 1
* **Epochs:** 1
* **Batch Size:**
* 2000
* 1000
* 500
* 300
* 50
* **Learning Rate:** 0.01
Configuration 2
* **Epochs:** 1
* **Batch Size:** 100
* **Learning Rate:**
* 0.8
* 0.5
* 0.1
* 0.05
* 0.01
Configuration 3
* **Epochs:**
* 1
* 2
* 3
* 4
* 5
* **Batch Size:** 100
* **Learning Rate:** 0.2
The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.
*If you're having trouble solving problem 3, you can view the solution [here](https://github.com/udacity/CarND-TensorFlow-Lab/blob/master/solutions.ipynb).*
```
# TODO: Find the best parameters for each configuration
# epochs =
# batch_size =
# learning_rate =
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
```
## Test
Set the epochs, batch_size, and learning_rate with the best learning parameters you discovered in problem 3. You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
```
# TODO: Set the epochs, batch_size, and learning_rate with the best parameters from problem 3
# epochs =
# batch_size =
# learning_rate =
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
```
# Multiple layers
Good job! You built a one layer TensorFlow network! However, you want to build more than one layer. This is deep learning after all! In the next section, you will start to satisfy your need for more layers.
| github_jupyter |
```
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import cv2
import pickle
import dlib
import face_recognition as fr
import matplotlib.pyplot as plt
import re
import glob
import json
from urllib.request import urlopen
import time
import os
```
3 ~ 17번까지 한국 연예인
# 연예인 이름 목록
### 목록 크롤링
```
driver = webdriver.Chrome()
name_list_url = 'https://namu.wiki/w/%EC%97%B0%EC%98%88%EC%9D%B8/%EB%B3%B8%EB%AA%85%20%EB%B0%8F%20%EC%98%88%EB%AA%85'
driver.get(name_list_url)
def preprocess(name):
name = re.sub('\(.+\)', '', name)
name = name.strip()
return name
name_list = []
error_list = []
for i in range(3,18):
class_ = driver.find_elements_by_class_name('wiki-heading-content')[i]
raw_name_list = class_.find_elements_by_css_selector('ul.wiki-list li')
for raw_name in raw_name_list:
split_name = raw_name.text.split('→')
stage_name = preprocess(split_name[0])
try:
real_name = re.search('[가-힣]+', split_name[1])[0].strip()
except Exception as e:
error_list.append(raw_name.text)
continue
new_name = stage_name + ' ' + real_name
name_list.append(new_name)
print(f'{len(name_list)} persons returned')
print(f'{len(error_list)} persons got error')
print(error_list)
```
### 전처리
오타로 인해 다른 이름과 다르게 표시가 된 이름들 수정
```
stage_name = preprocess(error_list[0].split('-')[0])
real_name = re.search('[가-힣]+', error_list[0].split('-')[1])[0]
print(stage_name, real_name)
name_list.append(stage_name + ' ' + real_name)
stage_name = preprocess(error_list[1].split('→')[0])
real_name = re.search('[가-힣A-Za-z ]+', error_list[1].split('→')[1])[0].strip()
print(stage_name, real_name)
name_list.append(stage_name + ' ' + real_name)
stage_name = preprocess(error_list[2].split('→')[0])
real_name = re.search('[가-힣A-Za-z ]+', error_list[2].split('→')[-1])[0].strip()
print(stage_name, real_name)
name_list.append(stage_name + ' ' + real_name)
```
### 목록 이름 저장
```
name_list = name_list[6:]
with open('celebrity/celebrity_name.pkl', 'wb') as f:
pickle.dump(name_list, f)
with open('celebrity/celebrity_name.pkl', 'rb') as f:
celebrity_name_list = pickle.load(f)
len(celebrity_name_list)
```
# 얼굴 크롤링 간 조건
다양한 얼굴 중 임베딩에 가장 적합한 이미지를 크롤링하기위해 조건 설정
- 얼굴 탐지 및 Embedding이 가능한 이미지
- 썬글라스 착용 시 제외
- 마스크 착용 시 제외
- 얼굴 각도가 좌,우로 돌아간 경우 제외
- 이미지 크기가 340x340보다 작으면 제외
- 제외되는 경우가 너무 많아서 크기 수정
### 썬글라스 탐지
눈 주위의 픽셀값을 이용해 썬글라스 착용 여부 탐지
- 왼쪽 : Landmark 36 ~ 41
- 오른쪽 : Landmakr 42 ~ 47
- luminance를 이용해 밝기 측정
```
example = fr.load_image_file('celebrity/GRAY 이성화.jpg')
example_show = example.copy()
landmarks = fr.face_landmarks(example)
landmarks = []
for k, v in fr.face_landmarks(example)[0].items():
landmarks.extend(v)
for number, landmark in enumerate(landmarks):
cv2.circle(example_show, landmark, 2, (0, 255, 255), -1)
plt.imshow(example_show)
def get_brightness_around_eye(image):
try:
landmarks = []
for k, v in fr.face_landmarks(image)[0].items():
landmarks.extend(v)
left1, right1 = landmarks[36][0], landmarks[39][0]
top1, bottom1 = landmarks[37][1], landmarks[41][1]
left2, right2 = landmarks[42][0], landmarks[45][0]
top2, bottom2 = landmarks[43][1], landmarks[47][1]
image[top1:bottom1, left1:right1] = np.nan
image[top2:bottom2, left2:right2] = np.nan
left_glass = image[top1-10:bottom1+10, left1-5:right1+5]
right_glass = image[top2-10:bottom2+10, left2-5:right2+5]
luminance_left = np.nanmean(0.2126*left_glass[:,:,0] + 0.7152*left_glass[:,:,1] + 0.0722*left_glass[:,:,2])
luminance_right = np.nanmean(0.2126*right_glass[:,:,0] + 0.7152*right_glass[:,:,1] + 0.0722*right_glass[:,:,2])
#luminance_left2 = np.nanmean(left_glass[:,:,0] + left_glass[:,:,1] + left_glass[:,:,2])
#luminance_right2 = np.nanmean(right_glass[:,:,0] + right_glass[:,:,1] + right_glass[:,:,2])
return luminance_left, luminance_right
except:
return None
```
### 마스크 착용 탐지
다양한 얼굴 중 마스크 착용 시 제외
- 입 주변의 픽셀값을 이용해 마스크 착용 여부 탐지
- Lanmark 2, 6, 11, 14 사용
```
# 랜드마크 2, 6, 11, 14
def get_brightness_around_mouse(image):
landmarks = []
for k, v in fr.face_landmarks(image)[0].items():
landmarks.extend(v)
left1, right1 = landmarks[6][0], landmarks[11][0]
top1, bottom1 = landmarks[2][1], landmarks[6][1]
mask = image[top1:bottom1, left1:right1]
mask_luminance = np.nanmean(0.2126*mask[:,:,0] + 0.7152*mask[:,:,1] + 0.0722*mask[:,:,2])
return mask_luminance
```
### 얼굴 각도 측정
얼굴이 좌,우로 돌아간 정도를 측정해 정면이 아닐 시 제외
- 왼쪽과 오른쪽 볼의 길이차를 이용해 회전 정도 측정
- Landmark 2, 14, 30 사용
```
def ratio_of_face_rotate(image):
landmarks = []
for k, v in fr.face_landmarks(image)[0].items():
landmarks.extend(v)
left = np.linalg.norm(np.array(landmarks[30]) - np.array(landmarks[2]))
right = np.linalg.norm(np.array(landmarks[30]) - np.array(landmarks[14]))
ratio = min(left, right) / max(left, right)
return ratio
```
### 이미지 사이즈 조건
행 : 404.16
열 : 340.0
# 이미지 크롤링
크롤링 간 사용하는 유용함 함수는 따로 정의
### 이미지 저장
사이트로부터 얻은 이미지의 URL을 이용해 이미지 다운로드
```
def save_image_from_url(image_url):
with urlopen(image_url.get_attribute('src')) as f:
with open(f'celebrity/{name}.jpg', 'wb') as file_name:
img = f.read()
file_name.write(img)
return img, file_name
```
### 이미지 다운로드
```
delay=2
error_name = []
driver = webdriver.Chrome()
for name in name_list:
driver.get(f'https://search.naver.com/search.naver?where=image&sm=tab_jum&query={name}')
time.sleep(0.2)
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.TAG_NAME, 'section')))
driver.find_element_by_tag_name('html').send_keys(Keys.END)
image_url_list = driver.find_elements_by_css_selector('div.tile_item._item img._image._listImage')[:50]
is_succeed = 0
for image_url in image_url_list:
img, file_name = save_image_from_url(image_url)
error_message = ''
try:
image = fr.load_image_file(file_name.name)
if (image.shape[0] < 200) or (image.shape[1] < 200):
print(name, 'Too Small')
continue
locations = fr.face_locations(image)
if len(locations) != 1:
print(name, 'No Face')
continue
top, right, bottom, left = locations[0]
face_cropped = image[top:bottom, left:right]
if (face_cropped.shape[0] < 30) or (face_cropped.shape[1] < 30):
print(name, 'Cropped Too Small')
continue
face_embedding = fr.face_encodings(face_cropped)
if len(face_embedding) != 1:
print(name, 'No Embedding')
continue
ratio = ratio_of_face_rotate(image)
if ratio < 0.85:
print(name, 'Rotated')
continue
left_eye, right_eye = get_brightness_around_eye(image)
if (left_eye < 60) & (right_eye < 60):
print(name, 'Maybe Sunglasses')
continue
mask = get_brightness_around_mouse(image)
if mask > 220:
print(name, 'Maybe Mask?')
is_succeed = 1
print(name, 'Succeed')
break
except Exception as ex:
print(name, 'failed detection', ex)
continue
if is_succeed == 0:
os.remove(file_name.name)
error_name.append(name)
print(name, 'collecting Failed')
except Exception as ex:
print(name, ex)
error_name.append(name)
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
x = np.array([[0, 1, 2], [0, 1, 2]])
y = np.array([[0, 0, 0], [1, 1, 1]])
plt.plot(x, y, color='red', marker='.', linestyle='')
plt.grid(True)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 100, 100)
y = np.linspace(0, 100, 100)
X, Y = np.meshgrid(x, y)
plt.plot(X, Y, color='red', marker='.', linestyle='')
plt.grid(True)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(1, 10, 0.02)
y = np.arange(1, 10, 0.02)
X, Y = np.meshgrid(x, y)
plt.plot(X, Y, color='red')
plt.grid(True)
plt.show()
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
def plot_decision_regions(X_train, y_train, X_test, y_test, classifier):
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
# 随机选择所有的
markers = ('s', 'x', 'o', '^', 'v')
# 随机选择所有的
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
# 每个类别的颜色还有marker
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
XX, YY = np.meshgrid(np.arange(x1_min, x1_max, 0.02),
np.arange(x2_min, x2_max, 0.02))
z = classifier.predict(np.array([XX.ravel(), YY.ravel()]).T)
Z = z.reshape(XX.shape)
plt.contourf(XX, YY, Z, alpha=0.3, cmap=cmap)
plt.xlim(XX.min(), XX.max())
plt.ylim(YY.min(), YY.max())
for i, c in enumerate(np.unique(y_train)):
plt.scatter(x=X_train[y_train==c, 0],
y=X_train[y_train==c, 1],
alpha=0.8,
c=colors[i],
marker=markers[i],
label=c,
edgecolor='black')
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
edgecolor='black',
alpha=1.0,
linewidths=1,
marker='o',
s=100,
label='test set')
plt.xlabel('petal lenght [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
import pandas as pd
df = pd.read_csv('./wine.data', header=None)
X, y = df.iloc[:, 1:3].values, df.iloc[:, 0].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
from sklearn.neighbors import KNeighborsClassifier
knc = KNeighborsClassifier()
knc.fit(X_train, y_train)
plot_decision_regions(X_train, y_train, X_test, y_test, knc)
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y)
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
ss.fit(X_train)
X_train_std = ss.transform(X_train)
X_test_std = ss.transform(X_test)
from sklearn.neighbors import KNeighborsClassifier
knc = KNeighborsClassifier()
knc.fit(X_train_std, y_train)
print('score: ', knc.score(X_test_std, y_test))
plot_decision_regions(X_train_std, y_train, X_test_std, y_test, knc)
```
| github_jupyter |
# Using SageMaker Neo to Compile a Tensorflow U-Net Model
[SageMaker Neo](https://aws.amazon.com/sagemaker/neo/) makes it easy to compile pre-trained TensorFlow models and build an inference optimized container without the need for any custom model serving or inference code.
<img src="https://paperswithcode.com/media/methods/Screen_Shot_2020-07-07_at_9.08.00_PM_rpNArED.png" align="center" style="padding: 8px;width:500px;">
[U-Net](https://paperswithcode.com/method/u-net) is an architecture for semantic segmentation. It's a popular model for biological images including Ultrasound, Microscopy, CT, MRI and more.
In this example, we will show how deploy a pre-trained U-Net model to a SageMaker Endpoint with Neo compilation using the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk), and then use the models to perform inference requests. We also provide a performance comparison so you can see the benefits of model compilation.
## Setup
First, we need to ensure we have SageMaker Python SDK 1.x and Tensorflow 1.15.x. Then, import necessary Python packages.
```
!pip install -U --quiet --upgrade "sagemaker"
!pip install -U --quiet "tensorflow==1.15.3"
import tarfile
import numpy as np
import sagemaker
import time
from sagemaker.utils import name_from_base
```
Next, we'll get the IAM execution role and a few other SageMaker specific variables from our notebook environment, so that SageMaker can access resources in your AWS account later in the example.
```
from sagemaker import get_execution_role
from sagemaker.session import Session
role = get_execution_role()
sess = Session()
region = sess.boto_region_name
bucket = sess.default_bucket()
```
SageMaker [Neo supports Tensorflow 1.15.x](https://docs.amazonaws.cn/en_us/sagemaker/latest/dg/neo-supported-cloud.html). Check your version of Tensorflow to prevent downstream framework errors.
```
import tensorflow as tf
print(tf.__version__) # This notebook runs on TensorFlow 1.15.x or earlier
```
## Download U-Net Model
The SageMaker Neo TensorFlow Serving Container works with any model stored in TensorFlow's [SavedModel format](https://www.tensorflow.org/guide/saved_model). This could be the output of your own training job or a model trained elsewhere. For this example, we will use a pre-trained version of the U-Net model based on this [repo](https://github.com/kamalkraj/DATA-SCIENCE-BOWL-2018).
```
model_name = 'unet_medical'
export_path = 'export'
model_archive_name = 'unet-medical.tar.gz'
model_archive_url = 'https://sagemaker-neo-artifacts.s3.us-east-2.amazonaws.com/{}'.format(model_archive_name)
!wget {model_archive_url}
```
The pre-trained model and its artifacts are saved in a compressed tar file (.tar.gz) so unzip first with:
```
!tar -xvzf unet-medical.tar.gz
```
After downloading the model, we can inspect it using TensorFlow's ``saved_model_cli`` command. In the command output, you should see
```
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['serving_default']:
...
```
The command output should also show details of the model inputs and outputs.
```
import os
model_path = os.path.join(export_path, 'Servo/1')
!saved_model_cli show --all --dir {model_path}
```
Next we need to create a model archive file containing the exported model.
## Upload the model archive file to S3
We now have a suitable model archive ready in our notebook. We need to upload it to S3 before we can create a SageMaker Model that. We'll use the SageMaker Python SDK to handle the upload.
```
model_data = Session().upload_data(path=model_archive_name, key_prefix='model')
print('model uploaded to: {}'.format(model_data))
```
## Create a SageMaker Model and Endpoint
Now that the model archive is in S3, we can create an unoptimized Model and deploy it to an
Endpoint.
```
from sagemaker.tensorflow.serving import Model
instance_type = 'ml.c4.xlarge'
framework = "TENSORFLOW"
framework_version = "1.15.3"
sm_model = Model(model_data=model_data, framework_version=framework_version,role=role)
uncompiled_predictor = sm_model.deploy(initial_instance_count=1, instance_type=instance_type)
```
## Make predictions using the endpoint
The endpoint is now up and running, and ready to handle inference requests. The `deploy` call above returned a `predictor` object. The `predict` method of this object handles sending requests to the endpoint. It also automatically handles JSON serialization of our input arguments, and JSON deserialization of the prediction results.
We'll use this sample image:
<img src="https://sagemaker-neo-artifacts.s3.us-east-2.amazonaws.com/cell-4.png" align="left" style="padding: 8px;">
```
sample_img_fname = 'cell-4.png'
sample_img_url = 'https://sagemaker-neo-artifacts.s3.us-east-2.amazonaws.com/{}'.format(sample_img_fname)
!wget {sample_img_url}
# read the image file into a tensor (numpy array)
import cv2
image = cv2.imread(sample_img_fname)
original_shape = image.shape
import matplotlib.pyplot as plt
plt.imshow(image, cmap='gray', interpolation='none')
plt.show()
image = np.resize(image, (256, 256, 3))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.asarray(image)
image = np.expand_dims(image, axis=0)
start_time = time.time()
# get a prediction from the endpoint
# the image input is automatically converted to a JSON request.
# the JSON response from the endpoint is returned as a python dict
result = uncompiled_predictor.predict(image)
print("Prediction took %.2f seconds" % (time.time() - start_time))
# show the predicted segmentation image
cutoff = 0.4
segmentation_img = np.squeeze(np.asarray(result['predictions'])) > cutoff
segmentation_img = segmentation_img.astype(np.uint8)
segmentation_img = np.resize(segmentation_img, (original_shape[0], original_shape[1]))
plt.imshow(segmentation_img, "gray")
plt.show()
```
## Uncompiled Predictor Performance
```
shape_input = np.random.rand(1, 256, 256, 3)
uncompiled_results = []
for _ in range(100):
start = time.time()
uncompiled_predictor.predict(image)
uncompiled_results.append((time.time() - start) * 1000)
print("\nPredictions for un-compiled model: \n")
print('\nP95: ' + str(np.percentile(uncompiled_results, 95)) + ' ms\n')
print('P90: ' + str(np.percentile(uncompiled_results, 90)) + ' ms\n')
print('P50: ' + str(np.percentile(uncompiled_results, 50)) + ' ms\n')
print('Average: ' + str(np.average(uncompiled_results)) + ' ms\n')
```
## Compile model using SageMaker Neo
```
# Replace the value of data_shape below and
# specify the name & shape of the expected inputs for your trained model in JSON
# Note that -1 is replaced with 1 for the batch size placeholder
data_shape = {'inputs':[1, 224, 224, 3]}
instance_family = 'ml_c4'
compilation_job_name = name_from_base('medical-tf-Neo')
# output path for compiled model artifact
compiled_model_path = 's3://{}/{}/output'.format(bucket, compilation_job_name)
optimized_estimator = sm_model.compile(target_instance_family=instance_family,
input_shape=data_shape,
job_name=compilation_job_name,
role=role,
framework=framework.lower(),
framework_version=framework_version,
output_path=compiled_model_path
)
```
## Create Optimized Endpoint
```
optimized_predictor = optimized_estimator.deploy(initial_instance_count = 1, instance_type = instance_type)
start_time = time.time()
# get a prediction from the endpoint
# the image input is automatically converted to a JSON request.
# the JSON response from the endpoint is returned as a python dict
result = optimized_predictor.predict(image)
print("Prediction took %.2f seconds" % (time.time() - start_time))
```
## Compiled Predictor Performance
```
compiled_results = []
test_input = {"instances": np.asarray(shape_input).tolist()}
#Warmup inference.
optimized_predictor.predict(image)
# Inferencing 100 times.
for _ in range(100):
start = time.time()
optimized_predictor.predict(image)
compiled_results.append((time.time() - start) * 1000)
print("\nPredictions for compiled model: \n")
print('\nP95: ' + str(np.percentile(compiled_results, 95)) + ' ms\n')
print('P90: ' + str(np.percentile(compiled_results, 90)) + ' ms\n')
print('P50: ' + str(np.percentile(compiled_results, 50)) + ' ms\n')
print('Average: ' + str(np.average(compiled_results)) + ' ms\n')
```
## Performance Comparison
Here we compare inference speed up provided by SageMaker Neo. P90 is 90th percentile latency. We add this because it represents the tail of the latency distribution (worst case). More information on latency percentiles [here](https://blog.bramp.net/post/2018/01/16/measuring-percentile-latency/).
```
p90 = np.percentile(uncompiled_results, 90) / np.percentile(compiled_results, 90)
p50 = np.percentile(uncompiled_results, 50) / np.percentile(compiled_results, 50)
avg = np.average(uncompiled_results) / np.average(compiled_results)
print("P90 Speedup: %.2f" % p90)
print("P50 Speedup: %.2f" % p50)
print("Average Speedup: %.2f" % avg)
```
## Additional Information
## Cleaning up
To avoid incurring charges to your AWS account for the resources used in this tutorial, you need to delete the SageMaker Endpoint.
```
uncompiled_predictor.delete_endpoint()
optimized_predictor.delete_endpoint()
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/JavaScripts/Image/Polynomial.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Image/Polynomial.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Image/Polynomial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
```
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
```
## Add Earth Engine Python script
```
# Add Earth Engine dataset
# Applies a non-linear contrast enhancement to a MODIS image using
# function -0.2 + 2.4x - 1.2x^2.
# Load a MODIS image and apply the scaling factor.
img = ee.Image('MODIS/006/MOD09GA/2012_03_09') \
.select(['sur_refl_b01', 'sur_refl_b04', 'sur_refl_b03']) \
.multiply(0.0001)
# Apply the polynomial enhancement.
adj = img.polynomial([-0.2, 2.4, -1.2])
Map.setCenter(-107.24304, 35.78663, 8)
Map.addLayer(img, {'min': 0, 'max': 1}, 'original')
Map.addLayer(adj, {'min': 0, 'max': 1}, 'adjusted')
```
## Display Earth Engine data layers
```
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
```
| github_jupyter |
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
from torch.utils.data import DataLoader
import torch
import os
import torchvision.transforms as transforms
from faster_rcnn.utils.datasets.voc.voc import VOCDetection
from faster_rcnn.utils.datasets.adapter import convert_data
from faster_rcnn.utils.display.images import imshow, result_show
from faster_rcnn.utils.datasets.merge import VOCMerge
root = '/data/data'
ds = VOCMerge(root, 'train', dataset_name='tmp')
val_ds = VOCMerge(root, 'val', dataset_name='tmp')
print(len(val_ds))
batch_size = 1
train_data_loader = DataLoader(ds, batch_size=batch_size, shuffle=True, collate_fn=convert_data, num_workers=0, drop_last=True)
val_data_loader = DataLoader(val_ds, batch_size=batch_size, collate_fn=convert_data, num_workers=0, drop_last=True)
categories = ds.classes
print(len(categories))
from faster_rcnn.faster_rcnn import FastRCNN
net = FastRCNN(categories, debug=False)
net.load_state_dict(torch.load('./checkpoints/faster_model.pkl'))
import random, string
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
with torch.no_grad():
for i, data in enumerate(val_data_loader):
file_name = randomword(10)
print file_name + ".txt"
if not data:
continue
batch_tensor, im_info, batch_boxes, batch_boxes_index, _ = data
pred_boxes, scores, classes, rois, im_data = net.detect_blob(batch_tensor, im_info, 0.2)
for k in range(batch_tensor.shape[0]):
with open(os.path.join('./evaluate/ground-truth/', file_name + ".txt"), "w") as f:
for box in batch_boxes:
f.write("%s %d %d %d %d\n" % (categories[int(box[4])] , box[0],box[1],box[2],box[3]))
with open(os.path.join('./evaluate/predicted/', file_name + ".txt"), "w") as f:
for box in zip(classes, scores, pred_boxes):
f.write("%s %.2f %d %d %d %d\n" % (box[0], box[1], box[2][0], box[2][1],box[2][2],box[2][3]))
# imshow(batch_tensor[k], predict_boxes=batch_boxes[ np.where(batch_boxes_index == k )])
# result_show(im_data[0], pred_boxes, classes, scores)
with torch.no_grad():
result = net.detect("./test_im/146.jpg", thr=0.7)
if result:
pred_boxes, scores, classes, rois, im_data = result
if(len(pred_boxes)):
result_show(im_data[0], pred_boxes, classes, scores)
```
| github_jupyter |
# Instructions
Implement a PyTorch dataset for keypoint detection.
Read about custom datasets here:
* https://jdhao.github.io/2017/10/23/pytorch-load-data-and-make-batch/
Image augmentation is an important part of deep learning pipelines. It artificially increases your training sample by generating transformed versions of images.
<img src="static/imgaug.jpg" alt="Drawing" style="width: 600px;"/>
You can read about it here:
* https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
* https://github.com/aleju/imgaug
You should implement the following augmentations:
* randomly fliping left and right
* randomly fliping up and down
* randomly translating by up to 4 pixels
* randomly rotating the image by 180 degrees
* randomly scaling the image from 1.0 to 1.5
Apart from reading images and augmenting, the loader is also cropping the input image by using outputs of the localizer network (bounding box coordinates).
# Your Solution
Your solution function should be called solution. In this case we leave it for consistency but you don't need to do anything with it.
CONFIG is a dictionary with all parameters that you want to pass to your solution function.
```
def solution():
return DatasetAligner
class DatasetAligner(Dataset):
def __init__(self, X, y, crop_coordinates, img_dirpath, augmentation, target_size, bins_nr):
super().__init__()
self.X = X.reset_index(drop=True)
self.y = y.reset_index(drop=True)
self.crop_coordinates = crop_coordinates
self.img_dirpath = img_dirpath
self.target_size = target_size
self.bins_nr = bins_nr
self.augmentation = augmentation
def load_image(self, img_name):
"""
Read image from disk to numpy array
"""
return img_array
def __len__(self):
"""
Determine the length of the dataset
"""
return length
def __getitem__(self, index):
"""
This method should take the image filepath at X[index] and targets at y[index] and
preprocess them. Use your aligner_preprocessing function.
Xi_tensor: is a torch.FloatTensor for image
yi_tensors: is a torch.LongTensor for targets it's shape should be 1 x k where k is the number of outputs
"""
return Xi_tensor, yi_tensors
def aligner_preprocessing(img, target, crop_coordinates, augmentation, *, org_size, target_size, bins_nr):
"""
Run augmentations and transformations on image and target
"""
processed_image, processed_target = crop_image_and_adjust_target(img, target, crop_coordinates)
if augmentation:
"""
Run augmentations on Image (and target if needed)
"""
"""
Transform coordinates to bin numbers as explained below and normalize the image
"""
processed_target = bin_quantizer(processed_target, (height, width), bins_nr)
processed_image = normalize_img(processed_image)
return processed_image, processed_target
def crop_image_and_adjust_target(img, target, crop_coordinates):
"""
crop image by using localization network predictions.
Remember to adjust the keypoint positions to the cropped image
"""
return cropped_image, adjusted_target
def bin_quantizer(coordinates, shape, bins_nr):
"""
Quantize the height and width and transform coordinates to bin numbers
"""
return binned_coordinates
def normalize_img(img):
mean = [0.28201905, 0.37246801, 0.42341868]
std = [0.13609867, 0.12380088, 0.13325344]
"""
Normalize Image
"""
return normalized_img
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import add_changepoints_to_plot
import multiprocessing as mp
from datetime import date,timedelta
import time as t
import matplotlib.pyplot as plt
import defs
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
%matplotlib inline
plt.rcParams['figure.figsize']=(20,10)
plt.style.use('ggplot')
market_dfset = {}
dfset = {}
modelset = {}
futureset = {}
forecastset = {}
figureset = {}
legendset = {}
tickers = []
manager = mp.Manager()
tickers = manager.list()
#get file object
f = open("list", "r")
while(True):
#read next line
line = f.readline()
#if line is empty, you are done with all lines in the file
if not line:
break
#you can access the line
tickers.append(line.strip())
#close file
f.close()
market_dfset=manager.dict()
modelset=manager.dict()
forecastset=manager.dict()
p = {}
for ticker in tickers:
p[ticker]= mp.Process(target=defs.run_prophet,args=(tickers,ticker,market_dfset,modelset,forecastset))
l = len(tickers)
c = mp.cpu_count()
for i in range(0, l, c):
for j in range(0,c):
if (i+j<l):
p[tickers[i+j]].start()
for j in range(0,c):
if (i+j<l): p[tickers[i+j]].join()
for ticker in tickers:
figureset[ticker] = market_dfset[ticker]["Close"].plot()
legendset[ticker]=figureset[ticker].legend() #get the legend
legendset[ticker].get_texts()[0].set_text(ticker) #change the legend text
plt.show()
for ticker in tickers:
print(ticker)
print(forecastset[ticker][['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(n=30))
figure=modelset[ticker].plot(forecastset[ticker])
fig = modelset[ticker].plot(forecastset[ticker])
a = add_changepoints_to_plot(fig.gca(), modelset[ticker], forecastset[ticker])
figure2=modelset[ticker].plot_components(forecastset[ticker])
plt.show()
print('\n')
```
# Plotting the forecast
```
two_yearset = {}
```
With the data that we have, it is hard to see how good/bad the forecast (blue line) is compared to the actual data (black dots). Let's take a look at the last 800 data points (~2 years) of forecast vs actual without looking at the future forecast (because we are just interested in getting a visual of the error between actual vs forecast).
```
for ticker in tickers:
two_yearset[ticker] = forecastset[ticker].set_index('ds').join(market_dfset[ticker])
two_yearset[ticker] = two_yearset[ticker][['Close', 'yhat', 'yhat_upper', 'yhat_lower' ]].dropna().tail(800)
two_yearset[ticker]['yhat']=np.exp(two_yearset[ticker].yhat)
two_yearset[ticker]['yhat_upper']=np.exp(two_yearset[ticker].yhat_upper)
two_yearset[ticker]['yhat_lower']=np.exp(two_yearset[ticker].yhat_lower)
two_yearset[ticker].tail()
figureset[ticker]=two_yearset[ticker][['Close', 'yhat']].plot()
figureset[ticker].plot(two_yearset[ticker].yhat_upper, color='black', linestyle=':', alpha=0.5)
figureset[ticker].plot(two_yearset[ticker].yhat_lower, color='black', linestyle=':', alpha=0.5)
figureset[ticker].set_title('Actual (Orange) vs Forecasted Upper & Lower Confidence (Black)')
figureset[ticker].set_ylabel('Price')
figureset[ticker].set_xlabel('Date')
legendset[ticker]=figureset[ticker].legend() #get the legend
legendset[ticker].get_texts()[0].set_text(ticker) #change the legend text
plt.show()
two_years_AE_set = {}
for ticker in tickers:
two_years_AE_set[ticker] = (two_yearset[ticker].yhat - two_yearset[ticker].Close)
print(ticker)
print(two_years_AE_set[ticker].describe())
print("R2 score: ",r2_score(two_yearset[ticker].Close, two_yearset[ticker].yhat))
print("MSE score: ",mean_squared_error(two_yearset[ticker].Close, two_yearset[ticker].yhat))
print("MAE score: ",mean_absolute_error(two_yearset[ticker].Close, two_yearset[ticker].yhat))
print('\n')
full_dfset = {}
for ticker in tickers:
full_dfset[ticker] = forecastset[ticker].set_index('ds').join(market_dfset[ticker])
full_dfset[ticker]['yhat']=np.exp(full_dfset[ticker]['yhat'])
for ticker in tickers:
fig, ax1 = plt.subplots()
n=365
ax1.plot(full_dfset[ticker].tail(n).Close)
ax1.plot(full_dfset[ticker].tail(n).yhat, color='black', linestyle=':')
ax1.fill_between(full_dfset[ticker].tail(n).index, np.exp(full_dfset[ticker]['yhat_upper'].tail(n)), np.exp(full_dfset[ticker]['yhat_lower'].tail(n)), alpha=0.5, color='darkgray')
ax1.set_title(ticker)
ax1.set_ylabel('Price')
ax1.set_xlabel('Date')
plt.show()
n=365 + 15
d=30
fig2, ax2 = plt.subplots()
ax2.plot(full_dfset[ticker][full_dfset[ticker].last_valid_index()-pd.DateOffset(n, 'D'):full_dfset[ticker].last_valid_index()-pd.DateOffset(n-d, 'D')].Close)
ax2.plot(full_dfset[ticker][full_dfset[ticker].last_valid_index()-pd.DateOffset(n, 'D'):full_dfset[ticker].last_valid_index()-pd.DateOffset(n-d, 'D')].yhat, color='black', linestyle=':')
ax2.fill_between(full_dfset[ticker][full_dfset[ticker].last_valid_index()-pd.DateOffset(n, 'D'):full_dfset[ticker].last_valid_index()-pd.DateOffset(n-d, 'D')].index, np.exp(full_dfset[ticker][full_dfset[ticker].last_valid_index()-pd.DateOffset(n, 'D'):full_dfset[ticker].last_valid_index()-pd.DateOffset(n-d, 'D')]['yhat_upper']), np.exp(full_dfset[ticker][full_dfset[ticker].last_valid_index()-pd.DateOffset(n, 'D'):full_dfset[ticker].last_valid_index()-pd.DateOffset(n-d, 'D')]['yhat_lower']), alpha=0.5, color='darkgray')
ax2.set_title(ticker)
ax2.set_ylabel('Price')
ax2.set_xlabel('Date')
pd.merge(full_dfset[ticker]['yhat'].tail(n=365), pd.merge(np.exp(full_dfset[ticker]['yhat_upper'].tail(n=365)), np.exp(full_dfset[ticker]['yhat_lower'].tail(n=365)),on='ds'),on='ds').to_csv(ticker+'-365Days.csv')
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Keras overview
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/keras/overview"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/keras/overview.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/keras/overview.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/keras/overview.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This guide gives you the basics to get started with Keras. It's a 10-minute read.
## Import tf.keras
`tf.keras` is TensorFlow's implementation of the
[Keras API specification](https://keras.io). This is a high-level
API to build and train models that includes first-class support for
TensorFlow-specific functionality, such as [eager execution](../eager.ipynb),
`tf.data` pipelines, and [Estimators](../estimator.ipynb).
`tf.keras` makes TensorFlow easier to use without sacrificing flexibility and
performance.
To get started, import `tf.keras` as part of your TensorFlow program setup:
```
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow import keras
```
`tf.keras` can run any Keras-compatible code, but keep in mind:
* The `tf.keras` version in the latest TensorFlow release might not be the same
as the latest `keras` version from PyPI. Check `tf.keras.__version__`.
* When [saving a model's weights](./save_and_serialize.ipynb), `tf.keras` defaults to the
[checkpoint format](../checkpoint.ipynb). Pass `save_format='h5'` to
use HDF5 (or pass a filename that ends in `.h5`).
## Build a simple model
### Sequential model
In Keras, you assemble *layers* to build *models*. A model is (usually) a graph
of layers. The most common type of model is a stack of layers: the
`tf.keras.Sequential` model.
To build a simple, fully-connected network (i.e. multi-layer perceptron):
```
from tensorflow.keras import layers
model = tf.keras.Sequential()
# Adds a densely-connected layer with 64 units to the model:
model.add(layers.Dense(64, activation='relu'))
# Add another:
model.add(layers.Dense(64, activation='relu'))
# Add a softmax layer with 10 output units:
model.add(layers.Dense(10, activation='softmax'))
```
You can find a complete, short example of how to use Sequential models [here](https://www.tensorflow.org/tutorials/quickstart/beginner).
To learn about building more advanced models than Sequential models, see:
- [Guide to the Keras Functional API](./functional.ipynb)
- [Guide to writing layers and models from scratch with subclassing](./custom_layers_and_models.ipynb)
### Configure the layers
There are many `tf.keras.layers` available. Most of them share some common constructor
arguments:
* `activation`: Set the activation function for the layer. This parameter is
specified by the name of a built-in function or as a callable object. By
default, no activation is applied.
* `kernel_initializer` and `bias_initializer`: The initialization schemes
that create the layer's weights (kernel and bias). This parameter is a name or
a callable object. This defaults to the `"Glorot uniform"` initializer.
* `kernel_regularizer` and `bias_regularizer`: The regularization schemes
that apply the layer's weights (kernel and bias), such as L1 or L2
regularization. By default, no regularization is applied.
The following instantiates `tf.keras.layers.Dense` layers using constructor
arguments:
```
# Create a sigmoid layer:
layers.Dense(64, activation='sigmoid')
# Or:
layers.Dense(64, activation=tf.keras.activations.sigmoid)
# A linear layer with L1 regularization of factor 0.01 applied to the kernel matrix:
layers.Dense(64, kernel_regularizer=tf.keras.regularizers.l1(0.01))
# A linear layer with L2 regularization of factor 0.01 applied to the bias vector:
layers.Dense(64, bias_regularizer=tf.keras.regularizers.l2(0.01))
# A linear layer with a kernel initialized to a random orthogonal matrix:
layers.Dense(64, kernel_initializer='orthogonal')
# A linear layer with a bias vector initialized to 2.0s:
layers.Dense(64, bias_initializer=tf.keras.initializers.Constant(2.0))
```
## Train and evaluate
### Set up training
After the model is constructed, configure its learning process by calling the
`compile` method:
```
model = tf.keras.Sequential([
# Adds a densely-connected layer with 64 units to the model:
layers.Dense(64, activation='relu', input_shape=(32,)),
# Add another:
layers.Dense(64, activation='relu'),
# Add a softmax layer with 10 output units:
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
```
`tf.keras.Model.compile` takes three important arguments:
* `optimizer`: This object specifies the training procedure. Pass it optimizer
instances from the `tf.keras.optimizers` module, such as
`tf.keras.optimizers.Adam` or
`tf.keras.optimizers.SGD`. If you just want to use the default parameters, you can also specify optimizers via strings, such as `'adam'` or `'sgd'`.
* `loss`: The function to minimize during optimization. Common choices include
mean square error (`mse`), `categorical_crossentropy`, and
`binary_crossentropy`. Loss functions are specified by name or by
passing a callable object from the `tf.keras.losses` module.
* `metrics`: Used to monitor training. These are string names or callables from
the `tf.keras.metrics` module.
* Additionally, to make sure the model trains and evaluates eagerly, you can make sure to pass `run_eagerly=True` as a parameter to compile.
The following shows a few examples of configuring a model for training:
```
# Configure a model for mean-squared error regression.
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
loss='mse', # mean squared error
metrics=['mae']) # mean absolute error
# Configure a model for categorical classification.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.CategoricalAccuracy()])
```
### Train from NumPy data
For small datasets, use in-memory [NumPy](https://www.numpy.org/)
arrays to train and evaluate a model. The model is "fit" to the training data
using the `fit` method:
```
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.fit(data, labels, epochs=10, batch_size=32)
```
`tf.keras.Model.fit` takes three important arguments:
* `epochs`: Training is structured into *epochs*. An epoch is one iteration over
the entire input data (this is done in smaller batches).
* `batch_size`: When passed NumPy data, the model slices the data into smaller
batches and iterates over these batches during training. This integer
specifies the size of each batch. Be aware that the last batch may be smaller
if the total number of samples is not divisible by the batch size.
* `validation_data`: When prototyping a model, you want to easily monitor its
performance on some validation data. Passing this argument—a tuple of inputs
and labels—allows the model to display the loss and metrics in inference mode
for the passed data, at the end of each epoch.
Here's an example using `validation_data`:
```
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
val_data = np.random.random((100, 32))
val_labels = np.random.random((100, 10))
model.fit(data, labels, epochs=10, batch_size=32,
validation_data=(val_data, val_labels))
```
### Train from tf.data datasets
Use the [Datasets API](../data.ipynb) to scale to large datasets
or multi-device training. Pass a `tf.data.Dataset` instance to the `fit`
method:
```
# Instantiates a toy dataset instance:
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.fit(dataset, epochs=10)
```
Since the `Dataset` yields batches of data, this snippet does not require a `batch_size`.
Datasets can also be used for validation:
```
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))
val_dataset = val_dataset.batch(32)
model.fit(dataset, epochs=10,
validation_data=val_dataset)
```
### Evaluate and predict
The `tf.keras.Model.evaluate` and `tf.keras.Model.predict` methods can use NumPy
data and a `tf.data.Dataset`.
Here's how to *evaluate* the inference-mode loss and metrics for the data provided:
```
# With Numpy arrays
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.evaluate(data, labels, batch_size=32)
# With a Dataset
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.evaluate(dataset)
```
And here's how to *predict* the output of the last layer in inference for the data provided,
as a NumPy array:
```
result = model.predict(data, batch_size=32)
print(result.shape)
```
For a complete guide on training and evaluation, including how to write custom training loops from scratch, see the [guide to training and evaluation](./train_and_evaluate.ipynb).
## Build complex models
### The Functional API
The `tf.keras.Sequential` model is a simple stack of layers that cannot
represent arbitrary models. Use the
[Keras functional API](./functional.ipynb)
to build complex model topologies such as:
* Multi-input models,
* Multi-output models,
* Models with shared layers (the same layer called several times),
* Models with non-sequential data flows (e.g. residual connections).
Building a model with the functional API works like this:
1. A layer instance is callable and returns a tensor.
2. Input tensors and output tensors are used to define a `tf.keras.Model`
instance.
3. This model is trained just like the `Sequential` model.
The following example uses the functional API to build a simple, fully-connected
network:
```
inputs = tf.keras.Input(shape=(32,)) # Returns an input placeholder
# A layer instance is callable on a tensor, and returns a tensor.
x = layers.Dense(64, activation='relu')(inputs)
x = layers.Dense(64, activation='relu')(x)
predictions = layers.Dense(10, activation='softmax')(x)
```
Instantiate the model given inputs and outputs.
```
model = tf.keras.Model(inputs=inputs, outputs=predictions)
# The compile step specifies the training configuration.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Trains for 5 epochs
model.fit(data, labels, batch_size=32, epochs=5)
```
### Model subclassing
Build a fully-customizable model by subclassing `tf.keras.Model` and defining
your own forward pass. Create layers in the `__init__` method and set them as
attributes of the class instance. Define the forward pass in the `call` method.
Model subclassing is particularly useful when
[eager execution](../eager.ipynb) is enabled, because it allows the forward pass
to be written imperatively.
Note: if you need your model to *always* run imperatively, you can set `dynamic=True` when calling the `super` constructor.
> Key Point: Use the right API for the job. While model subclassing offers
flexibility, it comes at a cost of greater complexity and more opportunities for
user errors. If possible, prefer the functional API.
The following example shows a subclassed `tf.keras.Model` using a custom forward
pass that does not have to be run imperatively:
```
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_model')
self.num_classes = num_classes
# Define your layers here.
self.dense_1 = layers.Dense(32, activation='relu')
self.dense_2 = layers.Dense(num_classes, activation='sigmoid')
def call(self, inputs):
# Define your forward pass here,
# using layers you previously defined (in `__init__`).
x = self.dense_1(inputs)
return self.dense_2(x)
```
Instantiate the new model class:
```
model = MyModel(num_classes=10)
# The compile step specifies the training configuration.
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Trains for 5 epochs.
model.fit(data, labels, batch_size=32, epochs=5)
```
### Custom layers
Create a custom layer by subclassing `tf.keras.layers.Layer` and implementing
the following methods:
* `__init__`: Optionally define sublayers to be used by this layer.
* `build`: Create the weights of the layer. Add weights with the `add_weight`
method.
* `call`: Define the forward pass.
* Optionally, a layer can be serialized by implementing the `get_config` method
and the `from_config` class method.
Here's an example of a custom layer that implements a `matmul` of an input with
a kernel matrix:
```
class MyLayer(layers.Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
def get_config(self):
base_config = super(MyLayer, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
```
Create a model using your custom layer:
```
model = tf.keras.Sequential([
MyLayer(10),
layers.Activation('softmax')])
# The compile step specifies the training configuration
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Trains for 5 epochs.
model.fit(data, labels, batch_size=32, epochs=5)
```
Learn more about creating new layers and models from scratch with subclassing in the [Guide to writing layers and models from scratch](./custom_layers_and_models.ipynb).
## Callbacks
A callback is an object passed to a model to customize and extend its behavior
during training. You can write your own custom callback, or use the built-in
`tf.keras.callbacks` that include:
* `tf.keras.callbacks.ModelCheckpoint`: Save checkpoints of your model at
regular intervals.
* `tf.keras.callbacks.LearningRateScheduler`: Dynamically change the learning
rate.
* `tf.keras.callbacks.EarlyStopping`: Interrupt training when validation
performance has stopped improving.
* `tf.keras.callbacks.TensorBoard`: Monitor the model's behavior using
[TensorBoard](https://tensorflow.org/tensorboard).
To use a `tf.keras.callbacks.Callback`, pass it to the model's `fit` method:
```
callbacks = [
# Interrupt training if `val_loss` stops improving for over 2 epochs
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
# Write TensorBoard logs to `./logs` directory
tf.keras.callbacks.TensorBoard(log_dir='./logs')
]
model.fit(data, labels, batch_size=32, epochs=5, callbacks=callbacks,
validation_data=(val_data, val_labels))
```
<a name='save_and_restore'></a>
## Save and restore
<a name="weights_only"></a>
### Save just the weights values
Save and load the weights of a model using `tf.keras.Model.save_weights`:
```
model = tf.keras.Sequential([
layers.Dense(64, activation='relu', input_shape=(32,)),
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Save weights to a TensorFlow Checkpoint file
model.save_weights('./weights/my_model')
# Restore the model's state,
# this requires a model with the same architecture.
model.load_weights('./weights/my_model')
```
By default, this saves the model's weights in the
[TensorFlow checkpoint](../checkpoint.ipynb) file format. Weights can
also be saved to the Keras HDF5 format (the default for the multi-backend
implementation of Keras):
```
# Save weights to a HDF5 file
model.save_weights('my_model.h5', save_format='h5')
# Restore the model's state
model.load_weights('my_model.h5')
```
### Save just the model configuration
A model's configuration can be saved—this serializes the model architecture
without any weights. A saved configuration can recreate and initialize the same
model, even without the code that defined the original model. Keras supports
JSON and YAML serialization formats:
```
# Serialize a model to JSON format
json_string = model.to_json()
json_string
import json
import pprint
pprint.pprint(json.loads(json_string))
```
Recreate the model (newly initialized) from the JSON:
```
fresh_model = tf.keras.models.model_from_json(json_string)
```
Serializing a model to YAML format requires that you install `pyyaml` *before you import TensorFlow*:
```
yaml_string = model.to_yaml()
print(yaml_string)
```
Recreate the model from the YAML:
```
fresh_model = tf.keras.models.model_from_yaml(yaml_string)
```
Caution: Subclassed models are not serializable because their architecture is
defined by the Python code in the body of the `call` method.
### Save the entire model in one file
The entire model can be saved to a file that contains the weight values, the
model's configuration, and even the optimizer's configuration. This allows you
to checkpoint a model and resume training later—from the exact same
state—without access to the original code.
```
# Create a simple model
model = tf.keras.Sequential([
layers.Dense(10, activation='softmax', input_shape=(32,)),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, batch_size=32, epochs=5)
# Save entire model to a HDF5 file
model.save('my_model.h5')
# Recreate the exact same model, including weights and optimizer.
model = tf.keras.models.load_model('my_model.h5')
```
Learn more about saving and serialization for Keras models in the guide to [save and serialize models](./save_and_serialize.ipynb).
<a name="eager_execution"></a>
## Eager execution
[Eager execution](../eager.ipynb) is an imperative programming
environment that evaluates operations immediately. This is not required for
Keras, but is supported by `tf.keras` and useful for inspecting your program and
debugging.
All of the `tf.keras` model-building APIs are compatible with eager execution.
And while the `Sequential` and functional APIs can be used, eager execution
especially benefits *model subclassing* and building *custom layers*—the APIs
that require you to write the forward pass as code (instead of the APIs that
create models by assembling existing layers).
See the [eager execution guide](../eager.ipynb) for
examples of using Keras models with custom training loops and `tf.GradientTape`.
You can also find a complete, short example [here](https://www.tensorflow.org/tutorials/quickstart/advanced).
## Distribution
### Multiple GPUs
`tf.keras` models can run on multiple GPUs using
`tf.distribute.Strategy`. This API provides distributed
training on multiple GPUs with almost no changes to existing code.
Currently, `tf.distribute.MirroredStrategy` is the only supported
distribution strategy. `MirroredStrategy` does in-graph replication with
synchronous training using all-reduce on a single machine. To use
`distribute.Strategy`s , nest the optimizer instantiation and model construction and compilation in a `Strategy`'s `.scope()`, then
train the model.
The following example distributes a `tf.keras.Model` across multiple GPUs on a
single machine.
First, define a model inside the distributed strategy scope:
```
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10,)))
model.add(layers.Dense(1, activation='sigmoid'))
optimizer = tf.keras.optimizers.SGD(0.2)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.summary()
```
Next, train the model on data as usual:
```
x = np.random.random((1024, 10))
y = np.random.randint(2, size=(1024, 1))
x = tf.cast(x, tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.shuffle(buffer_size=1024).batch(32)
model.fit(dataset, epochs=1)
```
For more information, see the [full guide on Distributed Training in TensorFlow](../distributed_training.ipynb).
| github_jupyter |
<table width="100%"> <tr>
<td style="background-color:#ffffff;">
<a href="https://qsoftware.lu.lv/index.php/qworld/" target="_blank"><img src="../images/qworld.jpg" width="35%" align="left"> </a></td>
<td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
prepared by Abuzer Yakaryilmaz (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
</td>
</tr></table>
<table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
<h2> <font color="blue"> Solutions for </font>Coin Flip: A Probabilistic Bit</h2>
<a id="task1"></a>
<h3> Task 1: Simulating FairCoin in Python</h3>
Flip a fair coin 100 times. Calculate the total number of heads and tails, and then check the ratio of the number of heads and the number of tails.
Do the same experiment 1000 times.
Do the same experiment 10,000 times.
Do the same experiment 100,000 times.
Do your results get close to the ideal case (the numbers of heads and tails are equal)?
<h3>Solution</h3>
```
from random import randrange
for experiment in [100,1000,10000,100000]:
heads = tails = 0
for i in range(experiment):
if randrange(2) == 0: heads = heads + 1
else: tails = tails + 1
print("experiment:",experiment)
print("heads =",heads," tails = ",tails)
print("the ratio of #heads/#tails is",(round(heads/tails,4)))
print() # empty line
```
<a id="task2"></a>
<h3> Task 2: Simulating BiasedCoin in Python</h3>
Flip the following biased coin 100 times. Calcuate the total numbers of heads and tails, and then check the ratio of the number of heads and the number of tails.
$
BiasedCoin = \begin{array}{c|cc} & \mathbf{Head} & \mathbf{Tail} \\ \hline \mathbf{Head} & 0.6 & 0.6 \\ \mathbf{Tail} & 0.4 & 0.4 \end{array}
$
Do the same experiment 1000 times.
Do the same experiment 10,000 times.
Do the same experiment 100,000 times.
Do your results get close to the ideal case $ \mypar{ \dfrac{ \mbox{# of heads} }{ \mbox{# of tails} } = \dfrac{0.6}{0.4} = 1.50000000 } $?
<h3>Solution</h3>
```
from random import randrange
# let's pick a random number between {0,1,...,99}
# it is expected to be less than 60 with probability 0.6
# and greater than or equal to 60 with probability 0.4
for experiment in [100,1000,10000,100000]:
heads = tails = 0
for i in range(experiment):
if randrange(100) <60: heads = heads + 1 # with probability 0.6
else: tails = tails + 1 # with probability 0.4
print("experiment:",experiment)
print("heads =",heads," tails = ",tails)
print("the ratio of #heads/#tails is",(round(heads/tails,4)))
print() # empty line
```
<a id="task3"></a>
<h3> Task 3</h3>
Write a function to implement the described biased coin,
The inputs are integers $ N >0 $ and $ 0 \leq B < N $.
The output is either "Heads" or "Tails".
<h3>Solution</h3>
```
def biased_coin(N,B):
from random import randrange
random_number = randrange(N)
if random_number < B:
return "Heads"
else:
return "Tails"
```
<a id="task4"></a>
<h3> Task 4</h3>
We use the biased coin described in Task 3.
(You may use the function given in the solution.)
We pick $ N $ as 101.
Our task is to determine the value of $ B $ experimentially without checking its value directly.
Flip the (same) biased coin 500 times, collect the statistics, and then guess the bias.
Compare your guess with the actual bias by calculating the error (the absolute value of the difference).
<h3>Solution</h3>
```
def biased_coin(N,B):
from random import randrange
random_number = randrange(N)
if random_number < B:
return "Heads"
else:
return "Tails"
from random import randrange
N = 101
B = randrange(100)
total_tosses = 500
the_number_of_heads = 0
for i in range(total_tosses):
if biased_coin(N,B) == "Heads":
the_number_of_heads = the_number_of_heads + 1
my_guess = the_number_of_heads/total_tosses
real_bias = B/N
error = abs(my_guess-real_bias)/real_bias*100
print("my guess is",my_guess)
print("real bias is",real_bias)
print("error (%) is",error)
```
| github_jupyter |
# Introduction to Deep Learning with PyTorch
In this notebook, you'll get introduced to [PyTorch](http://pytorch.org/), a framework for building and training neural networks. PyTorch in a lot of ways behaves like the arrays you love from Numpy. These Numpy arrays, after all, are just tensors. PyTorch takes these tensors and makes it simple to move them to GPUs for the faster processing needed when training neural networks. It also provides a module that automatically calculates gradients (for backpropagation!) and another module specifically for building neural networks. All together, PyTorch ends up being more coherent with Python and the Numpy/Scipy stack compared to TensorFlow and other frameworks.
## Neural Networks
Deep Learning is based on artificial neural networks which have been around in some form since the late 1950s. The networks are built from individual parts approximating neurons, typically called units or simply "neurons." Each unit has some number of weighted inputs. These weighted inputs are summed together (a linear combination) then passed through an activation function to get the unit's output.
<img src="assets/simple_neuron.png" width=400px>
Mathematically this looks like:
$$
\begin{align}
y &= f(w_1 x_1 + w_2 x_2 + b) \\
y &= f\left(\sum_i w_i x_i +b \right)
\end{align}
$$
With vectors this is the dot/inner product of two vectors:
$$
h = \begin{bmatrix}
x_1 \, x_2 \cdots x_n
\end{bmatrix}
\cdot
\begin{bmatrix}
w_1 \\
w_2 \\
\vdots \\
w_n
\end{bmatrix}
$$
## Tensors
It turns out neural network computations are just a bunch of linear algebra operations on *tensors*, a generalization of matrices. A vector is a 1-dimensional tensor, a matrix is a 2-dimensional tensor, an array with three indices is a 3-dimensional tensor (RGB color images for example). The fundamental data structure for neural networks are tensors and PyTorch (as well as pretty much every other deep learning framework) is built around tensors.
<img src="assets/tensor_examples.svg" width=600px>
With the basics covered, it's time to explore how we can use PyTorch to build a simple neural network.
```
# First, import PyTorch
import torch
def activation(x):
""" Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1+torch.exp(-x))
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 5 random normal variables
features = torch.randn((1, 5))
# True weights for our data, random normal variables again
weights = torch.randn_like(features)
# and a true bias term
bias = torch.randn((1, 1))
```
Above I generated data we can use to get the output of our simple network. This is all just random for now, going forward we'll start using normal data. Going through each relevant line:
`features = torch.randn((1, 5))` creates a tensor with shape `(1, 5)`, one row and five columns, that contains values randomly distributed according to the normal distribution with a mean of zero and standard deviation of one.
`weights = torch.randn_like(features)` creates another tensor with the same shape as `features`, again containing values from a normal distribution.
Finally, `bias = torch.randn((1, 1))` creates a single value from a normal distribution.
PyTorch tensors can be added, multiplied, subtracted, etc, just like Numpy arrays. In general, you'll use PyTorch tensors pretty much the same way you'd use Numpy arrays. They come with some nice benefits though such as GPU acceleration which we'll get to later. For now, use the generated data to calculate the output of this simple single layer network.
> **Exercise**: Calculate the output of the network with input features `features`, weights `weights`, and bias `bias`. Similar to Numpy, PyTorch has a [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) function, as well as a `.sum()` method on tensors, for taking sums. Use the function `activation` defined above as the activation function.
```
## Calculate the output of this network using the weights and bias tensors
def metwork_op(features,weights,bias):
return activation(torch.sum(torch.mm(features,weights)))
```
You can do the multiplication and sum in the same operation using a matrix multiplication. In general, you'll want to use matrix multiplications since they are more efficient and accelerated using modern libraries and high-performance computing on GPUs.
Here, we want to do a matrix multiplication of the features and the weights. For this we can use [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) or [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul) which is somewhat more complicated and supports broadcasting. If we try to do it with `features` and `weights` as they are, we'll get an error
```python
>> torch.mm(features, weights)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-13-15d592eb5279> in <module>()
----> 1 torch.mm(features, weights)
RuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033
```
As you're building neural networks in any framework, you'll see this often. Really often. What's happening here is our tensors aren't the correct shapes to perform a matrix multiplication. Remember that for matrix multiplications, the number of columns in the first tensor must equal to the number of rows in the second column. Both `features` and `weights` have the same shape, `(1, 5)`. This means we need to change the shape of `weights` to get the matrix multiplication to work.
**Note:** To see the shape of a tensor called `tensor`, use `tensor.shape`. If you're building neural networks, you'll be using this method often.
There are a few options here: [`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape), [`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_), and [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view).
* `weights.reshape(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)` sometimes, and sometimes a clone, as in it copies the data to another part of memory.
* `weights.resize_(a, b)` returns the same tensor with a different shape. However, if the new shape results in fewer elements than the original tensor, some elements will be removed from the tensor (but not from memory). If the new shape results in more elements than the original tensor, new elements will be uninitialized in memory. Here I should note that the underscore at the end of the method denotes that this method is performed **in-place**. Here is a great forum thread to [read more about in-place operations](https://discuss.pytorch.org/t/what-is-in-place-operation/16244) in PyTorch.
* `weights.view(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)`.
I usually use `.view()`, but any of the three methods will work for this. So, now we can reshape `weights` to have five rows and one column with something like `weights.view(5, 1)`.
> **Exercise**: Calculate the output of our little network using matrix multiplication.
```
## Calculate the output of this network using matrix multiplication
print(features.shape)
c=features.reshape(5,1)
print(c)
print(weights.shape)
print(metwork_op(c,weights,bias))
```
### Stack them up!
That's how you can calculate the output for a single neuron. The real power of this algorithm happens when you start stacking these individual units into layers and stacks of layers, into a network of neurons. The output of one layer of neurons becomes the input for the next layer. With multiple input units and output units, we now need to express the weights as a matrix.
<img src='assets/multilayer_diagram_weights.png' width=450px>
The first layer shown on the bottom here are the inputs, understandably called the **input layer**. The middle layer is called the **hidden layer**, and the final layer (on the right) is the **output layer**. We can express this network mathematically with matrices again and use matrix multiplication to get linear combinations for each unit in one operation. For example, the hidden layer ($h_1$ and $h_2$ here) can be calculated
$$
\vec{h} = [h_1 \, h_2] =
\begin{bmatrix}
x_1 \, x_2 \cdots \, x_n
\end{bmatrix}
\cdot
\begin{bmatrix}
w_{11} & w_{12} \\
w_{21} &w_{22} \\
\vdots &\vdots \\
w_{n1} &w_{n2}
\end{bmatrix}
$$
The output for this small network is found by treating the hidden layer as inputs for the output unit. The network output is expressed simply
$$
y = f_2 \! \left(\, f_1 \! \left(\vec{x} \, \mathbf{W_1}\right) \mathbf{W_2} \right)
$$
```
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 3 random normal variables
features = torch.randn((1, 3))
# Define the size of each layer in our network
n_input = features.shape[1] # Number of input units, must match number of input features
n_hidden = 2 # Number of hidden units
n_output = 1 # Number of output units
# Weights for inputs to hidden layer
W1 = torch.randn(n_input, n_hidden)
# Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
```
> **Exercise:** Calculate the output for this multi-layer network using the weights `W1` & `W2`, and the biases, `B1` & `B2`.
```
## Your solution here
print(features.shape)
print(W1.shape)
print(W2.shape)
print(B1.shape)
c=activation(torch.add(torch.mm(features,W1),B1))
d=activation(torch.add(torch.mm(c,W2),B2))
print(c)
print(d)
```
If you did this correctly, you should see the output `tensor([[ 0.3171]])`.
The number of hidden units a parameter of the network, often called a **hyperparameter** to differentiate it from the weights and biases parameters. As you'll see later when we discuss training a neural network, the more hidden units a network has, and the more layers, the better able it is to learn from data and make accurate predictions.
## Numpy to Torch and back
Special bonus section! PyTorch has a great feature for converting between Numpy arrays and Torch tensors. To create a tensor from a Numpy array, use `torch.from_numpy()`. To convert a tensor to a Numpy array, use the `.numpy()` method.
```
import numpy as np
a = np.random.rand(4,3)
a
b = torch.from_numpy(a)
b
b.numpy()
```
The memory is shared between the Numpy array and Torch tensor, so if you change the values in-place of one object, the other will change as well.
```
# Multiply PyTorch Tensor by 2, in place
b.mul_(2)
# Numpy array matches new values from Tensor
a
```
| github_jupyter |
```
from datetime import datetime, timezone
import pandas as pd
import langdetect
import json
lang_code = pd.read_json('ISO-639-1-language.json')
def get_language(text=None):
langs = []
if text:
langs = [{
"name": lang_code.set_index('code').loc[lang.lang, 'name'],
"code": lang.lang.upper()
} for lang in langdetect.detect_langs(text)]
return langs
def get_software(data):
software = [dict(
name=lang.get('node', {}).get('name', ''),
version="",
library=(
[] if lang.get('node', {}).get('name', '') not in ['R', 'Python'] else
data.get('r_libs', []) if lang.get('node', {}).get('name', '') == 'R' else
data.get('py_libs', []))
) for lang in data.get('languages', [])]
return software
def has_content(node):
if isinstance(node, dict):
unode = {}
for key, value in node.items():
v = has_content(value)
if isinstance(v, int) or v: # This is placed here to prevent the `published` parameter from being removed in case `published` is set to 0.
unode[key] = v
elif isinstance(node, list):
unode = []
for element in node:
v = has_content(element)
if isinstance(v, int) or v: # This is placed here to prevent the `published` parameter from being removed in case `published` is set to 0.
unode.append(v)
else: # str, int, float <for now, assume no other container data types, e.g., tuples.>
unode = node
return unode
import pymongo
mongo = pymongo.MongoClient(port=27018)
db = mongo['nlp']
collection = db['github-bq']
collection.create_index([('readme', pymongo.TEXT)], name='readme_text_idx')
# collection.create_index(('description', pymongo.TEXT), name='description_text_idx')
```
# Fill the script template
```
def build_template_for_github(data, overwrite='no', published=1, repositoryid='central', cleanup=False):
title_idno = data['_id'].replace('/', '_')
tp_template = dict(
repositoryid=repositoryid,
published=published,
overwrite=overwrite,
doc_desc=dict(
title="",
idno="",
producers=[
dict(
name= "GitHub Bot",
abbr="",
affiliation="",
role="bot"
)
],
prod_date=datetime.now().strftime('%d %B %Y'),
version=""
),
project_desc=dict(
title_statement=dict(
idno=title_idno,
title=data.get('description', data.get('name', '')),
sub_title="",
alternate_title="",
translated_title=""
),
production_date=[
# pd.to_datetime(data['repo_created_at']).strftime('%d %B %Y')
pd.to_datetime(data['repo_created_at']).strftime('%B %Y')
],
geographic_units=[
dict(
name="",
code="",
type=""
)
],
authoring_entity=[
dict(
name=data['owner'],
role="owner",
affiliation=data.get('homepage_url', ''),
abbreviation="",
email=""
)
],
contributors=[
dict(
name="",
role="",
affiliation="",
abbreviation="",
email="",
url=""
)
],
curators= [
dict(
name= "",
role= "",
affiliation= "",
abbreviation= "",
email= "",
url= ""
)
],
abstract=data.get('readme', data.get('description', data.get('name', ''))),
keywords=[
dict(
name="",
vocabulary="",
uri=""
)
],
themes=[
dict(
name="",
vocabulary="",
uri=""
)
],
topics=[
dict(
id="",
name="",
parent_id="",
vocabulary="",
uri=""
)
],
disciplines=[
dict(
name="",
vocabulary="",
uri=""
)
],
output_types=[
dict(
type="",
description="",
uri="",
doi=""
)
],
repository_uri=[
dict(
name=data['_id'],
type="Github",
uri=f"https://github.com/{data['_id']}"
)
],
project_website=[
data.get('homepage_url', '')
],
version_statement=dict(
version="latest",
version_date=pd.to_datetime(data['repo_updated_at']).strftime('%d %B %Y'),
version_resp="",
version_notes="Latest update"
),
language=get_language(data.get('readme', data.get('description', data.get('name', 'english')))),
methods=[
dict(
name="",
note=""
)
],
software=get_software(data),
technology_environment="",
technology_requirements="",
reproduction_instructions="",
license=[
dict(
name=data.get('license_info', ''),
uri=""
)
],
review_process=[
dict(
submission_date="",
reviewer="",
review_status="",
approval_authority="",
approval_date=""
)
],
disclaimer="",
confidentiality="",
citation_requirement="",
datasets=[
dict(
name="",
idno="",
note="",
access_type="",
uri=""
)
],
sponsors=[
dict(
name="",
abbr="",
role="",
grant_no=""
)
],
acknowledgements=[
dict(
name="",
affiliation="",
role=""
)
],
related_projects=[
dict(
name="",
uri="",
note=""
)
],
contacts=[
dict(
name="",
affiliation="",
uri="",
phone=""
)
],
scripts=[
dict(
file_name="",
title="",
authors=[
dict(
name="",
abbr="",
role=""
)
],
date="",
format="",
software= "",
description= "",
methods= "",
dependencies= "",
instructions= "",
source_code_repo= "",
notes= ""
)
]
)
)
if cleanup:
tp_template = has_content(tp_template)
return tp_template
```
# Get data from the database
```
collection.count_documents({'$text': {'$search': '"economic"'}, 'readme': {'$exists': True}})
%%time
keywords = ['economic', 'nutrition', 'income inequality', 'agriculture', 'climate change', 'poverty', 'fragility', 'refugee']
payloads = []
for kw in keywords:
for data in collection.find({'$text': {'$search': f'"{kw}"'}, 'readme': {'$exists': True}}):
payloads.append(build_template_for_github(data, overwrite='yes', cleanup=True))
with open('github_nada_data.json', 'w') as fl:
json.dump(payloads, fl)
# climate_dataset = collection.find({'$text': {'$search': '"climate change"'}, 'readme': {'$exists': True}})
# poverty_dataset = collection.find({'$text': {'$search': '"poverty"'}, 'readme': {'$exists': True}})
# nutrition_dataset = collection.find({'$text': {'$search': '"nutrition"'}, 'readme': {'$exists': True}})
# refugee_dataset = collection.find({'$text': {'$search': '"refugee"'}, 'readme': {'$exists': True}})
# fragility_dataset = collection.find({'$text': {'$search': '"fragility"'}, 'readme': {'$exists': True}})
# agriculture_dataset = collection.find({'$text': {'$search': '"agriculture"'}, 'readme': {'$exists': True}})
# income_dataset = collection.find({'$text': {'$search': '"income inequality"'}, 'readme': {'$exists': True}})
# economics_dataset = collection.find({'$text': {'$search': '"economics"'}, 'readme': {'$exists': True}})
# data = collection.find_one({'primary_language': 'Python', '$text': {'$search': '"climate change"'}})
# data = collection.find_one({'primary_language': 'Python', '$text': {'$search': 'poverty'}})
# tp_template = build_template_for_github(data, cleanup=True)
# tp_template['doc_desc']
# template = {
# "repositoryid": 'central',
# "published": 1,
# "overwrite": "yes",
# "doc_desc": {
# # "title": "",
# "idno": "",
# "producers": [
# {
# "name": "GitHub Bot",
# "abbr": "",
# "affiliation": "",
# "role": "bot"
# }
# ],
# "prod_date": datetime.now().strftime('%d %B %Y'),
# "version": "1.0"
# },
# "project_desc": {
# "title_statement": {
# "idno": data['_id'].replace('/', '_'),
# "title": data.get('description', data.get('name', '')),
# "sub_title": "",
# "alternate_title": "",
# "translated_title": ""
# },
# "production_date": [
# datetime.now().strftime('%B %Y')
# ],
# "authoring_entity": [
# {
# "name": data['owner'],
# "role": "owner",
# "affiliation": data.get('homepage_url', ''),
# }
# ],
# "abstract": data.get('readme', data.get('description', data.get('name', ''))),
# "repository_uri": [
# {
# "name": data['_id'],
# "type": "GitHub",
# "uri": f"https://github.com/{data['_id']}"
# }
# ],
# "project_website": [
# data.get('homepage_url', '')
# ],
# "version_statement": {
# "version": "latest",
# "version_date": pd.to_datetime(data['repo_updated_at']).strftime('%d %B %Y'),
# "version_resp": "",
# "version_notes": "Latest update"
# },
# "language": get_language(data.get('readme', data.get('description', data.get('name', 'english')))),
# "software": [
# {
# "name": lang.get('node', {}).get('name', ''),
# "version": "",
# "library": (
# [] if lang.get('node', {}).get('name', '') not in ['R', 'Python'] else
# data.get('r_libs', []) if lang.get('node', {}).get('name', '') == 'R' else
# data.get('py_libs', []))
# } for lang in data.get('languages', [])
# ],
# "license": [
# {
# "name": data.get('license_info', ''),
# "uri": ""
# }
# ],
# }
# }
```
# Post to API
```
idno = data['project_desc']['title_statement']['idno'].replace('/', '_')
headers = {'X-API-KEY': '<API_KEY>'}
api_url = 'http://dev.ihsn.org/nada/index.php/api/datasets/create/script/'
response = requests.post(api_url + idno, headers=headers, json=template)
# collection.find_one({'primary_language': 'R', '$text': {'$search': 'poverty'}})
# collection.find({'$text': {'$search': "\"climate change\""}})
# data = {
# "_id": "00tau/skyline-addon-easyqc",
# "description": "Add-on script for performing easy quality control tasks within Skyline",
# "fork_count": 0,
# "insertion_date": "2019-11-24T04:36:07.963844+00:00",
# "languages": [
# {
# "node": {
# "name": "R"
# }
# }
# ],
# "last_updated_date": "2019-11-24T04:36:07.963844+00:00",
# "license_info": "GNU General Public License v3.0",
# "name": "skyline-addon-easyqc",
# "owner": "00tau",
# "primary_language": "R",
# "py_libs": [],
# "r_libs": [
# "chron",
# "ggplot2",
# "plyr"
# ],
# "readme": "# Start using easyQC for statistical process and quality control in mass spectrometry workflows\n\n## Introduction\n\nThe program `easyQC` is an external tool for statistical process and quality\ncontrol in mass spectrometry workflows that integrates nicely in the [Skyline\nTargeted Proteomics\nEnvironment](https://skyline.gs.washington.edu/labkey/project/home/software/Skyline/begin.view).\n\n## Feature list at a glance\n\n- Automatically sorts your data by date and time, and orders your observations\n with the most recent on the right. (\"What? Does this mean I don't need to\n sort my data manually, as it is the case for some other software tools out\n there?\", \"Yes.\")\n- Dynamically adapts to custom report templates. (See details below.)\n- Flow charts for single peptides can optionally be grouped together by their\n common protein accession. (See details below.)\n- Plots are generated in a nice page layout, ready for printing.\n- Observations are colour-coded by a beneficial four-colour-code. This makes\n it particularly easy to detect deviations from the norm.\n- Has a built in outlier detection, which provides you with useful robust\n features. (See details below.)\n- Plot as _many_ flow charts for as _many_ peptides as you like.\n\n## How to cite this software\n\nThe [Harvard UoB format]\n(http://lrweb.beds.ac.uk/guides/a-guide-to-referencing/cite_computer_program)\nsuggests to cite this software in the following fashion:\n\n Möbius, T.W. and Malchow, S. (2014) easyQC: Statistical Process and Quality\n Control in Mass Spectrometry Workflows (Version 1.0) [Computer program].\n Available at: http://00tau.github.io/skyline-addon-easyqc/ (Accessed 03.\n April, 2014)\n\nThank you for using (and citing) this software.\n\n## Installation using the skyline GUI\n\nSimply follow the GUI-clicking adventure by successively clicking on `Tools ->\nExternal Tools -> External Tool Store`. In the appearing list select (click\non) `easyQC`. You will be promoted for the path to `Rscript`, which needs to\nbe installed on you system.\n\nWe have realised that since the introduction of \"Live Reports\" in new Versions\nof Skyline, the import of new templates might fail. If this is the case for\nyou, make sure two switch off \"Live Reports\", restart Skyline, and try the\ninstallation again.\n\nThe underlying code-base of `easyQC` relies on the R-packages\n[ggplot2](http://ggplot2.org/), [plyr](http://plyr.had.co.nz/) and\n[chron](http://cran.r-project.org/web/packages/chron/index.html). Fortunately,\nall these packages are hosted on [CRAN](http://cran.r-project.org/), and should\nautomatically be installed into your R-environment, when installing `easyQC` in\nSkyline. If, for some reasons, this should not be the case for you, make sure\nthese three packages are installed in your R-environment.\n\n## Description\n\nThe software comes with an exemplary report template called `easyQC`. We\nrecommend to just go with this template, but feel free to create your own. The\nabsolute necessary fields your template should contain are:\n`PeptideModifiedSequence` and `PrecursorMz`. These two fields are used as\nidentifiers for your peptides, and, thus, all other fields should uniquely be\nidentifiable by these two. Optionally, the field `ProteinName` can be added to\nyour template.\n\nBy default, the flow charts of ten peptides are grouped together into one plot\neach. If your report template also contains the associated protein accession\nof each peptide, namely the field `ProteinName`, then all peptides which belong\nto the same protein accession are grouped into one plot.\n\nBefore the calculation of the mean and standard deviations of each flow chart,\nthe software will do some outlier detection of your data, namely [Grubbs' test\nfor outliers](http://en.wikipedia.org/wiki/Grubbs%27_test_for_outliers) will be\napplied. Observations which are classified as outliers by this test are\ndiscarded in the estimation of the mean and standard deviations. This gives\nthe estimated means and standard deviations some desirable\n[robust](http://en.wikipedia.org/wiki/Robust_statistics) features.\n\n## You can also use easyQC as a stand-alone command line program\n\nOn Linux, you simply need to add the directory in which you have cloned\n`easyQC`'s repository to your path. Also make sure that `easyQC.r` is\nexecutable.\n\n```\n% git clone https://github.com/00tau/skyline-addon-easyqc.git\n% cd skyline-addonn-easyqc\n% chmod +x easyQC.r\n% PATH=$(pwd):$PATH\n```\n\nThe synopsis is as follows:\n\n```\neasyQC.r [OPTIONS] REPORTFILE\n```\n\nWhere `OPTIONS` is either `verbose` or noting. For example, to produce some\nquality control plots from a file `some-report-file.csv` that has been\ngenerated by Skyline via some report template (e.g. the template `easyQC.skyr`\nshould come in mind here), run either one of the following two code lines from\nthe command line.\n\n```\n% easyQC.r some-report-file.csv\n% easyQC.r verbose some-report-file.csv\n```\n\nThis will produce a file `some-report-file.pdf` with all the plots you need.\n\nYou what to install the most recent and latest version in Skyline\n-----------------------------------------------------------------\n\nIf for some reasons, you are interested in installing the latest GitHub-version\n(or any other version of this software that is available on GitHub), the\nrepository contains a convenient Makefile that will create the necessary files\nfor the installation process for you. Simply type:\n\n```\n% make\n```\n\nThis will create a `easyQC.zip` file which contains the needed install scripts\nfor Skyline. Now, just follow your Skyline-GUI.\n\nAuthors\n-------\n\nThomas W. D. Möbius (Maintainer, R-programming), Sebastian Malchow (Skyline wizard)\n",
# "repo_created_at": "2014-02-25T15:26:30Z",
# "repo_id": "MDEwOlJlcG9zaXRvcnkxNzE3NzYxOQ==",
# "repo_updated_at": "2014-04-04T14:56:54Z",
# "stargazers": 0,
# "topics": [],
# "watchers": 1
# }
# collection.create_index([('readme', pymongo.TEXT)], name='readme_text_idx')
# collection.create_index([('description', pymongo.TEXT)], name='description_text_idx')
# template = {
# "repositoryid": 'central',
# "published": 1,
# "overwrite": "yes",
# "doc_desc": {
# # "title": "",
# "idno": "",
# "producers": [
# {
# "name": "GitHub Bot",
# "abbr": "",
# "affiliation": "",
# "role": "bot"
# }
# ],
# "prod_date": datetime.now().strftime('%B %Y'),
# "version": ""
# },
# "project_desc": {
# "title_statement": {
# "idno": data['_id'].replace('/', '_'),
# "title": data.get('description', data.get('name', '')),
# "sub_title": "",
# "alternate_title": "",
# "translated_title": ""
# },
# "production_date": [
# datetime.now().strftime('%B %Y')
# ],
# # "geographic_units": [
# # {
# # "name": "",
# # "code": "",
# # "type": ""
# # }
# # ],
# "authoring_entity": [
# {
# "name": data['owner'],
# "role": "owner",
# "affiliation": data.get('homepage_url', ''),
# # "abbreviation": null,
# # "email": null
# }
# ],
# # "contributors": [
# # {
# # "name": "string",
# # "role": "string",
# # "affiliation": "string",
# # "abbreviation": null,
# # "email": null,
# # "url": null
# # }
# # ],
# # "curators": [
# # {
# # "name": "string",
# # "role": "string",
# # "affiliation": "string",
# # "abbreviation": null,
# # "email": null,
# # "url": null
# # }
# # ],
# "abstract": data.get('readme', data.get('description', data.get('name', ''))),
# # "keywords": [
# # {
# # "name": "string",
# # "vocabulary": "string",
# # "uri": "string"
# # }
# # ],
# # "themes": [
# # {
# # "name": "string",
# # "vocabulary": "string",
# # "uri": "string"
# # }
# # ],
# # "topics": [
# # {
# # "id": "string",
# # "name": "string",
# # "parent_id": "string",
# # "vocabulary": "string",
# # "uri": "string"
# # }
# # ],
# # "disciplines": [
# # {
# # "name": "string",
# # "vocabulary": "string",
# # "uri": "string"
# # }
# # ],
# # "output_types": [
# # {
# # "type": "string",
# # "description": "string",
# # "uri": "string",
# # "doi": "string"
# # }
# # ],
# "repository_uri": [
# {
# "name": data['_id'],
# "type": "GitHub",
# "uri": f"https://github.com/{data['_id']}"
# }
# ],
# "project_website": [
# data.get('homepage_url', '')
# ],
# "version_statement": {
# "version": "latest",
# "version_date": pd.to_datetime(data['repo_updated_at']).strftime('%d %B %Y'),
# "version_resp": "",
# "version_notes": "Latest update"
# },
# "language": get_language(data.get('readme', data.get('description', data.get('name', 'english')))),
# # "methods": [
# # {
# # "name": "string",
# # "note": "string"
# # }
# # ],
# "software": [
# {
# "name": lang.get('node', {}).get('name', ''),
# "version": "",
# "library": (
# [] if lang.get('node', {}).get('name', '') not in ['R', 'Python'] else
# data.get('r_libs', []) if lang.get('node', {}).get('name', '') == 'R' else
# data.get('py_libs', []))
# } for lang in data.get('languages', [])
# ],
# # "technology_environment": "string",
# # "technology_requirements": "string",
# # "reproduction_instructions": "string",
# "license": [
# {
# "name": data.get('license_info', ''),
# "uri": ""
# }
# ],
# # "review_process": [
# # {
# # "submission_date": "string",
# # "reviewer": "string",
# # "review_status": "string",
# # "approval_authority": "string",
# # "approval_date": "string"
# # }
# # ],
# # "disclaimer": "string",
# # "confidentiality": "string",
# # "citation_requirement": "string",
# # "datasets": [
# # {
# # "name": "string",
# # "idno": "string",
# # "note": "string",
# # "access_type": "string",
# # "uri": "string"
# # }
# # ],
# # "sponsors": [
# # {
# # "name": "string",
# # "abbr": "string",
# # "role": "string",
# # "grant_no": "string"
# # }
# # ],
# # "acknowledgements": [
# # {
# # "name": "string",
# # "affiliation": "string",
# # "role": "string"
# # }
# # ],
# # "related_projects": [
# # {
# # "name": "string",
# # "uri": "string",
# # "note": "string"
# # }
# # ],
# # "contacts": [
# # {
# # "name": "string",
# # "affiliation": "string",
# # "uri": "string",
# # "phone": "string"
# # }
# # ],
# # "scripts": [
# # {
# # "file_name": "string",
# # "title": "string",
# # "authors": [
# # {
# # "name": "string",
# # "abbr": "string",
# # "role": "string"
# # }
# # ],
# # "date": "string",
# # "format": "string",
# # "software": "string",
# # "description": "string",
# # "methods": "string",
# # "dependencies": "string",
# # "instructions": "string",
# # "source_code_repo": "string",
# # "notes": "string"
# # }
# # ]
# }
# }
```
| github_jupyter |
# CDR3 entropy in shared and unshared clonotypes
Starting with unique cross-subject clonotype datasets, computes per-position entropy for CDR3s in unshared or shared (found in at least 6 of 10 samples) clonotypes.
The following Python packages are required:
* numpy
* pandas
and can be installed by running `pip install numpy pandas`
```
from __future__ import print_function
from collections import Counter
import os
import subprocess as sp
import sys
import numpy as np
import pandas as pd
```
## Get sequences
The raw dataset (unique cross-subject clonotypes) is too large to be included in this Github repo. Instead, a compressed archive containing all of the required data can be downloaded [**HERE**](http://burtonlab.s3.amazonaws.com/GRP_github_data/dedup_10-subject_pools.tar.gz). Decompressing the archive in the `./data` directory will allow the following code blocks to run without modification.
***NOTE:*** *The required data files are relatively large (~30GB in total), so ensure adequate storage space is available before downloading.*
```
def get_sequences(seq_files):
all_seqs = {}
for seq_type in seq_files.keys():
seq_file = seq_files[seq_type]
seqs = {'shared': {i: [] for i in range(7, 15)},
'unshared': {i: [] for i in range(7, 15)}}
with open(seq_file) as f:
for line in f:
sline = line.strip().split()
if not sline:
continue
try:
c = int(sline[0])
if c == 1:
s = 'unshared'
elif c in range(6, 11):
s = 'shared'
else:
continue
aa = sline[3]
l = len(aa)
if l not in range(7, 15):
continue
seqs[s][l].append(aa)
except IndexError:
continue
all_seqs[seq_type] = seqs
# downselect sequences so that shared and unshared pools are the same size
selected_seqs = {t: {'shared': {}, 'unshared': {}} for t in all_seqs.keys()}
for seq_type in ['observed', 'subject-specific synthetic']:
for length in range(7, 15):
num_seqs = min([len(all_seqs[seq_type][t][length]) for t in ['shared', 'unshared']])
for shared_type in ['shared', 'unshared']:
s = all_seqs[seq_type][shared_type][length]
if len(s) > num_seqs:
s = np.random.choice(s, size=num_seqs, replace=False)
selected_seqs[seq_type][shared_type][length] = s
return all_seqs, selected_seqs
files = {'observed': './data/dedup_10-subject_pools/10-subject_dedup_pool_with-counts.txt',
'subject-specific synthetic': './data/dedup_10-subject_pools/10-sample_dedup_pool_synthetic_subject-specific-models_with-counts.txt'}
all_seqs, seq_dict = get_sequences(files)
```
## Compute shared/unshared CDR3 entropy
```
def calculate_entropies(seq_dict, seq_type):
edata = []
for s in seq_dict.keys():
for l in seq_dict[s].keys():
seqs = seq_dict[s][l]
for residues in list(zip(*seqs))[3:-3]:
e = entropy(residues)
edata.append({'sample': '{} ({})'.format(seq_type, s), 'seq_type': seq_type,
'Shannon entropy': e, 'CDR3 length': l, 'shared': s})
return edata
def entropy(residues):
n_residues = len(residues)
if n_residues <= 1:
return 0.
counts = np.asarray(Counter(residues).values(), dtype=np.float64)
probs = counts[np.nonzero(counts)] / n_residues
n_classes = len(probs)
if n_classes <= 1:
return 0.
return - np.sum(probs * np.log(probs)) / np.log(n_classes)
entropy_data = []
print('Getting sequences...')
for seq_type in seq_dict.keys():
print(seq_type)
entropies = calculate_entropies(seq_dict[seq_type], seq_type)
entropy_data += entropies
entropy_df = pd.DataFrame(entropy_data)
entropy_df.to_csv('./data/per-position_shannon_entropies.csv')
```
## Shared CDR3 sequence properties
```
shared_seqs = []
for n in range(6, 11):
shared_seqs += seqs[n]
```
| github_jupyter |
```
#from preprocess import *
#standard module
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# import sklearn
from sklearn import linear_model
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import PolynomialFeatures
from sklearn import neighbors
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from scipy.spatial.distance import squareform
from scipy.stats import rankdata
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.neural_network import MLPRegressor
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": 2.5})
import sys
sys.path.append("../../tools/")
from preprocess import *
```
## Simple machine learning model
```
#load data
alldata_15G=np.loadtxt('../../mddata/15grid_shuffled.dat')
alldata = alldata_15G
```
Linear Regression
```
def linear_models_with_regularizations(X_train, X_test, y_train, y_test, alpha_ridge, alpha_lasso):
"""
Parameters
--------------
X_train, X_test: numpy matrix
y_train, y_test: numpy array
ridge: boolean
set ridge = True for including Ridge regression
Return: float
r2_score
"""
logTrans = False
if logTrans is True:
y_test = np.log(y_test)
y_train = np.log(y_train)
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
y_pred_regr = regr.predict(X_test)
#accuracy_score(Y_test, Y_pred)
# The coefficients
#print('Coefficients: \n', regr.coef_)
print("Mean squared error Linear Regression: %.2f" % mean_squared_error(y_test, y_pred_regr))
# Explained variance score: 1 is perfect prediction
#ac1 = r2_score(y_test, y_pred)
print("RMSE: %lf" %np.sqrt(np.sum(np.square(y_test-y_pred_regr))/len(y_test)))
print('r2_score: %.2f' % r2_score(y_test, y_pred_regr))
ysorted= np.sort(y_test)
xx = np.linspace(ysorted[0], ysorted[-1], len(ysorted))
plt.plot(xx, xx, 'r')
plt.plot(y_pred_regr, y_test, 'bo', alpha=0.5)
plt.xlabel('Predicted yield stress') #change the name here stress/strain
plt.ylabel('True yield stress')
plt.title('OLS with polynomial degree=2')
#plt.ylim(0, 1.2)
#plt.xlim(0, 1.2)
#plt.show()
#yy = y_test.reshape((len(y_test), 1))
plt.show()
ridge = linear_model.Ridge(alpha=alpha_ridge)
ridge.fit(X_train, y_train)
y_pred_ridge=ridge.predict(X_test)
#accuracy_score(Y_test, Y_pred)
# The coefficients
#print('Coefficients: \n', clf.coef_)
print("Mean squared error Ridge Regression: %.2f" % mean_squared_error(y_test, y_pred_ridge))
# Explained variance score: 1 is perfect prediction
print("RMSE: %lf" %np.sqrt(np.sum(np.square(y_test-y_pred_ridge))/len(y_test)))
print('r2_score: %.2f' % r2_score(y_test, y_pred_ridge))
#ac_ridge = r2_score(y_test, y_pred)
#plt.plot(y_pred, y_test, 'bo', alpha=0.5)
#plt.xlabel('y_test (fracture strain)')
#plt.ylabel('y_pred (fracture strain)')
#plt.title('Ridge Regression')
lasso = linear_model.Lasso(alpha=alpha_lasso)
lasso.fit(X_train, y_train)
y_pred_lasso=lasso.predict(X_test)
#accuracy_score(Y_test, Y_pred)
# The coefficients
#print('Coefficients: \n', clf.coef_)
print("Mean squared error LASSO: %.2f" % mean_squared_error(y_test, y_pred_lasso))
# Explained variance score: 1 is perfect prediction
print("RMSE: %lf" %np.sqrt(np.sum(np.square(y_test-y_pred_lasso))/len(y_test)))
print('r2_score: %.2f' % r2_score(y_test, y_pred_lasso))
#ac_lasso = r2_score(y_test, y_pred)
#plt.plot(y_test, y_pred, 'o')
#plt.xlabel('y_test (fracture strain)')
#plt.ylabel('y_pred (fracture strain)')
#plt.title('LASSO Regression')
#plt.show()
return y_pred_regr, y_pred_ridge, y_pred_lasso, regr.coef_, ridge.coef_, lasso.coef_
```
## Training
You can choose how many features to train
```
#split data into training and test set
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": .5})
#np.random.shuffle(alldata)
x, y=create_matrix(alldata, False, 2, 0.3, 15)
x = (x-.5)*2
X_train, X_valid, X_test, y_train, y_valid, y_test = split_data(x, y, 0.8, 0.2)
#choose polynomial degrees
poly = PolynomialFeatures(2, interaction_only=True, include_bias=True)
#poly = PolynomialFeatures(2)
X_train2 = poly.fit_transform(X_train)
print("Number of features: %d" %len(X_train2[0]))
X_test2 = poly.fit_transform(X_test)
#linear_models(X_train2, X_test2, y_train, y_test, ridge=True)
#y_train = (y_train-0.45937603178269587)/0.22056868516982353
#y_test = (y_test-0.45937603178269587)/0.22056868516982353
alpha=0.1
y_pred_regr, y_pred_ridge, y_pred_lasso, coef_regr, coef_ridge, coef_lasso = linear_models_with_regularizations(X_train2, X_test2, y_train, y_test, 10, 0.1)
#split data into training and test set
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": .5})
#np.random.shuffle(alldata)
x, y=create_matrix(alldata, False, 0, 0.3, 15)
x = (x-.5)*2
X_train, X_valid, X_test, y_train, y_valid, y_test = split_data(x, y, 0.8, 0.2)
#choose polynomial degrees
poly = PolynomialFeatures(3, interaction_only=True, include_bias=True)
#poly = PolynomialFeatures(2)
X_train2 = poly.fit_transform(X_train)
print("Number of features: %d" %len(X_train2[0]))
X_test2 = poly.fit_transform(X_test)
#linear_models(X_train2, X_test2, y_train, y_test, ridge=True)
#y_train = (y_train-0.45937603178269587)/0.22056868516982353
#y_test = (y_test-0.45937603178269587)/0.22056868516982353
alpha=0.1
y_pred_regr, y_pred_ridge, y_pred_lasso, coef_regr, coef_ridge, coef_lasso = linear_models_with_regularizations(X_train2, X_test2, y_train, y_test, 10, 0.1)
def NN_regressor(alldata, hl, obj, transform):
nn_regr = MLPRegressor(solver='lbfgs', alpha=1e-2, hidden_layer_sizes=hl, activation='relu', random_state=1)
#sorted_data = alldata[alldata[:,15].argsort()] #index 18 prob bad design, small -> goode design
np.random.shuffle(alldata)
#0nly fit top 20%
#sorted_data = sorted_data[int(0.8*len(sorted_data)):]
#np.random.shuffle(sorted_data)
#cutoff = sorted_data[int(len(alldata)/2), 17]
#x, y=create_matrix(sorted_data, True, 2, 30, NCcell_x*NCcell_y)
x, y=create_matrix(alldata, False, obj, 0.375, 15)
X_train, X_valid, X_test, y_train, y_valid, y_test = split_data(x, y, 0.8, 0.2)
#poly = PolynomialFeatures(1, interaction_only=True, include_bias=False)
#poly = PolynomialFeatures(interaction_only=True)
#X_train2 = X_train
#poly.fit_transform(X_train)
#x2 = poly.fit_transform(x)
#print("Number of features: %d" %len(X_train2[0]))
#X_test2 = poly.fit_transform(X_test)
if (transform is True):
poly = PolynomialFeatures(2, interaction_only=True, include_bias=False)
#poly = PolynomialFeatures(interaction_only=True)
X_train2 = poly.fit_transform(X_train)
#x2 = poly.fit_transform(x)
#print("Number of features: %d" %len(X_train2[0]))
X_test2 = poly.fit_transform(X_test)
else:
X_train2 = X_train
X_test2 = X_test
nn_regr.fit(X_train2, y_train)
y_pred_nn= nn_regr.predict(X_test2)
ysorted= np.sort(y_test)
xx = np.linspace(ysorted[0], ysorted[-1], len(ysorted))
plt.plot(xx, xx, 'r')
plt.plot(y_pred_nn, y_test, 'bo', alpha=0.5)
plt.xlabel('Predicted yield stress')
plt.ylabel('True yield strain')
plt.title('Neural Network')
print("Mean squared error: %lf" % mean_squared_error(y_test, y_pred_nn))
print("RMSE: %lf" %np.sqrt(np.sum(np.square(y_test-y_pred_nn))/len(y_test)))
# Explained variance score: 1 is perfect prediction
print('r2_score: %.2f' % r2_score(y_test, y_pred_nn))
return hl[0], np.sqrt(np.sum(np.square(y_test-y_pred_nn))/len(y_test)), r2_score(y_test, y_pred_nn), y_test, y_pred_nn
hl, rmse, ac, y_test, y_pred=NN_regressor(alldata, (1024, ), 0, False)
```
| github_jupyter |
## Calculate null distribution from median Cell Painting scores with same sample size as L1000
Code modified from @adeboyeML
```
import os
import pathlib
import pandas as pd
import numpy as np
from collections import defaultdict
from pycytominer import feature_select
from statistics import median
import random
from scipy import stats
import pickle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
np.random.seed(42)
# Load common compounds
common_file = pathlib.Path(
"..", "..", "..", "6.paper_figures", "data", "significant_compounds_by_threshold_both_assays.tsv.gz"
)
common_df = pd.read_csv(common_file, sep="\t")
common_compounds = common_df.compound.unique()
print(len(common_compounds))
cp_level4_path = "cellpainting_lvl4_cpd_replicate_datasets"
df_level4 = pd.read_csv(os.path.join(cp_level4_path, 'cp_level4_cpd_replicates_subsample.csv.gz'),
compression='gzip',low_memory = False)
print(df_level4.shape)
df_level4.head()
df_cpd_med_scores = pd.read_csv(os.path.join(cp_level4_path, 'cpd_replicate_median_scores_subsample.csv'))
df_cpd_med_scores = df_cpd_med_scores.set_index('cpd').rename_axis(None, axis=0).copy()
# Subset to common compound measurements
df_cpd_med_scores = df_cpd_med_scores.loc[df_cpd_med_scores.index.isin(common_compounds), :]
print(df_cpd_med_scores.shape)
df_cpd_med_scores.head()
def get_cpds_replicates(df, df_lvl4):
"""
This function returns all replicates id/names found in each compound
and in all doses(1-6)
"""
dose_list = list(set(df_lvl4['Metadata_dose_recode'].unique().tolist()))[1:7]
replicates_in_all = []
cpds_replicates = {}
for dose in dose_list:
rep_list = []
df_doses = df_lvl4[df_lvl4['Metadata_dose_recode'] == dose].copy()
for cpd in df.index:
replicate_names = df_doses[df_doses['pert_iname'] == cpd]['replicate_name'].values.tolist()
rep_list += replicate_names
if cpd not in cpds_replicates:
cpds_replicates[cpd] = [replicate_names]
else:
cpds_replicates[cpd] += [replicate_names]
replicates_in_all.append(rep_list)
return replicates_in_all, cpds_replicates
replicates_in_all, cpds_replicates = get_cpds_replicates(df_cpd_med_scores, df_level4)
def get_replicates_classes_per_dose(df, df_lvl4, cpds_replicates):
"""
This function gets all replicates ids for each distinct
no_of_replicates (i.e. number of replicates per cpd) class per dose (1-6)
Returns replicate_class_dict dictionary, with no_of_replicate classes as the keys,
and all the replicate_ids for each no_of_replicate class as the values
"""
df['replicate_id'] = list(cpds_replicates.values())
dose_list = list(set(df_lvl4['Metadata_dose_recode'].unique().tolist()))[1:7]
replicate_class_dict = {}
for dose in dose_list:
for size in df['no_of_replicates'].unique():
rep_lists = []
for idx in range(df[df['no_of_replicates'] == size].shape[0]):
rep_ids = df[df['no_of_replicates'] == size]['replicate_id'].values.tolist()[idx][dose-1]
rep_lists += rep_ids
if size not in replicate_class_dict:
replicate_class_dict[size] = [rep_lists]
else:
replicate_class_dict[size] += [rep_lists]
return replicate_class_dict
cpd_replicate_class_dict = get_replicates_classes_per_dose(df_cpd_med_scores, df_level4, cpds_replicates)
cpd_replicate_class_dict.keys()
def check_similar_replicates(replicates, dose, cpd_dict):
"""This function checks if two replicates are of the same compounds"""
for x in range(len(replicates)):
for y in range(x+1, len(replicates)):
for kys in cpd_dict:
if all(i in cpd_dict[kys][dose-1] for i in [replicates[x], replicates[y]]):
return True
return False
def get_random_replicates(all_replicates, no_of_replicates, dose, replicates_ids, cpd_replicate_dict):
"""
This function return a list of random replicates that are not of the same compounds
or found in the current cpd's size list
"""
while (True):
random_replicates = random.sample(all_replicates, no_of_replicates)
if not (any(rep in replicates_ids for rep in random_replicates) &
(check_similar_replicates(random_replicates, dose, cpd_replicate_dict))):
break
return random_replicates
def get_null_distribution_replicates(
cpd_replicate_class_dict,
dose_list,
replicates_lists,
cpd_replicate_dict,
rand_num = 1000
):
"""
This function returns a null distribution dictionary, with no_of_replicates(replicate class)
as the keys and 1000 lists of randomly selected replicate combinations as the values
for each no_of_replicates class per DOSE(1-6)
"""
random.seed(1903)
null_distribution_reps = {}
for dose in dose_list:
for replicate_class in cpd_replicate_class_dict:
replicates_ids = cpd_replicate_class_dict[replicate_class][dose-1]
replicate_list = []
for idx in range(rand_num):
start_again = True
while (start_again):
rand_cpds = get_random_replicates(replicates_lists[dose-1], replicate_class, dose,
replicates_ids, cpd_replicate_dict)
if rand_cpds not in replicate_list:
start_again = False
replicate_list.append(rand_cpds)
if replicate_class not in null_distribution_reps:
null_distribution_reps[replicate_class] = [replicate_list]
else:
null_distribution_reps[replicate_class] += [replicate_list]
return null_distribution_reps
len(cpds_replicates.keys())
dose_list = list(set(df_level4['Metadata_dose_recode'].unique().tolist()))[1:7]
null_distribution_replicates = get_null_distribution_replicates(
cpd_replicate_class_dict, dose_list, replicates_in_all, cpds_replicates
)
def save_to_pickle(null_distribution, path, file_name):
"""This function saves the null distribution replicates ids into a pickle file"""
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, file_name), 'wb') as handle:
pickle.dump(null_distribution, handle, protocol=pickle.HIGHEST_PROTOCOL)
#save the null_distribution_moa to pickle
save_to_pickle(null_distribution_replicates, cp_level4_path, 'null_distribution_subsample.pickle')
##load the null_distribution_moa from pickle
with open(os.path.join(cp_level4_path, 'null_distribution_subsample.pickle'), 'rb') as handle:
null_distribution_replicates = pickle.load(handle)
def assert_null_distribution(null_distribution_reps, dose_list):
"""
This function assert that each of the list in the 1000 lists of random replicate
combination (per dose) for each no_of_replicate class are distinct with no duplicates
"""
duplicates_reps = {}
for dose in dose_list:
for keys in null_distribution_reps:
null_dist = null_distribution_reps[keys][dose-1]
for reps in null_dist:
dup_reps = []
new_list = list(filter(lambda x: x != reps, null_dist))
if (len(new_list) != len(null_dist) - 1):
dup_reps.append(reps)
if dup_reps:
if keys not in duplicates_reps:
duplicates_reps[keys] = [dup_reps]
else:
duplicates_reps[keys] += [dup_reps]
return duplicates_reps
duplicate_replicates = assert_null_distribution(null_distribution_replicates, dose_list)
duplicate_replicates ##no duplicates
def calc_null_dist_median_scores(df, dose_num, replicate_lists):
"""
This function calculate the median of the correlation
values for each list in the 1000 lists of random replicate
combination for each no_of_replicate class per dose
"""
df_dose = df[df['Metadata_dose_recode'] == dose_num].copy()
df_dose = df_dose.set_index('replicate_name').rename_axis(None, axis=0)
df_dose.drop(['Metadata_broad_sample', 'Metadata_pert_id', 'Metadata_dose_recode',
'Metadata_Plate', 'Metadata_Well', 'Metadata_broad_id', 'Metadata_moa',
'broad_id', 'pert_iname', 'moa'],
axis = 1, inplace = True)
median_corr_list = []
for rep_list in replicate_lists:
df_reps = df_dose.loc[rep_list].copy()
reps_corr = df_reps.astype('float64').T.corr(method = 'pearson').values
median_corr_val = median(list(reps_corr[np.triu_indices(len(reps_corr), k = 1)]))
median_corr_list.append(median_corr_val)
return median_corr_list
def get_null_dist_median_scores(null_distribution_cpds, dose_list, df):
"""
This function calculate the median correlation scores for all
1000 lists of randomly combined compounds for each no_of_replicate class
across all doses (1-6)
"""
null_distribution_medians = {}
for key in null_distribution_cpds:
median_score_list = []
for dose in dose_list:
replicate_median_scores = calc_null_dist_median_scores(df, dose, null_distribution_cpds[key][dose-1])
median_score_list.append(replicate_median_scores)
null_distribution_medians[key] = median_score_list
return null_distribution_medians
null_distribution_medians = get_null_dist_median_scores(null_distribution_replicates, dose_list, df_level4)
def compute_dose_median_scores(null_dist_medians, dose_list):
"""
This function align median scores per dose, and return a dictionary,
with keys as dose numbers and values as all median null distribution/non-replicate correlation
scores for each dose
"""
median_scores_per_dose = {}
for dose in dose_list:
median_list = []
for keys in null_distribution_medians:
dose_median_list = null_distribution_medians[keys][dose-1]
median_list += dose_median_list
median_scores_per_dose[dose] = median_list
return median_scores_per_dose
dose_null_medians = compute_dose_median_scores(null_distribution_medians, dose_list)
#save the null_distribution_medians_per_dose to pickle
save_to_pickle(dose_null_medians, cp_level4_path, 'null_dist_medians_per_dose_subsample.pickle')
def get_p_value(median_scores_list, df, dose_name, cpd_name):
"""
This function calculate the p-value from the
null_distribution median scores for each compound
"""
actual_med = df.loc[cpd_name, dose_name]
p_value = np.sum(median_scores_list >= actual_med) / len(median_scores_list)
return p_value
def get_moa_p_vals(null_dist_median, dose_list, df_med_values):
"""
This function returns a dict, with compounds as the keys and the compound's
p-values for each dose (1-6) as the values
"""
null_p_vals = {}
for key in null_dist_median:
df_replicate_class = df_med_values[df_med_values['no_of_replicates'] == key]
for cpd in df_replicate_class.index:
dose_p_values = []
for num in dose_list:
dose_name = 'dose_' + str(num)
cpd_p_value = get_p_value(null_dist_median[key][num-1], df_replicate_class, dose_name, cpd)
dose_p_values.append(cpd_p_value)
null_p_vals[cpd] = dose_p_values
sorted_null_p_vals = {key:value for key, value in sorted(null_p_vals.items(), key=lambda item: item[0])}
return sorted_null_p_vals
null_p_vals = get_moa_p_vals(null_distribution_medians, dose_list, df_cpd_med_scores)
df_null_p_vals = pd.DataFrame.from_dict(null_p_vals, orient='index', columns = ['dose_' + str(x) for x in dose_list])
df_null_p_vals['no_of_replicates'] = df_cpd_med_scores['no_of_replicates']
df_null_p_vals.head(10)
def save_to_csv(df, path, file_name):
"""saves dataframes to csv"""
if not os.path.exists(path):
os.mkdir(path)
df.to_csv(os.path.join(path, file_name), index = False)
save_to_csv(df_null_p_vals.reset_index().rename({'index':'cpd'}, axis = 1), cp_level4_path,
'cpd_replicate_p_values_subsample.csv')
cpd_summary_file = pathlib.Path(cp_level4_path, 'cpd_replicate_p_values_melted_subsample.csv')
dose_recode_info = {
'dose_1': '0.04 uM', 'dose_2':'0.12 uM', 'dose_3':'0.37 uM',
'dose_4': '1.11 uM', 'dose_5':'3.33 uM', 'dose_6':'10 uM'
}
# Melt the p values
cpd_score_summary_pval_df = (
df_null_p_vals
.reset_index()
.rename(columns={"index": "compound"})
.melt(
id_vars=["compound", "no_of_replicates"],
value_vars=["dose_1", "dose_2", "dose_3", "dose_4", "dose_5", "dose_6"],
var_name="dose",
value_name="p_value"
)
)
cpd_score_summary_pval_df.dose = cpd_score_summary_pval_df.dose.replace(dose_recode_info)
# Melt the median matching scores
cpd_score_summary_df = (
df_cpd_med_scores
.reset_index()
.rename(columns={"index": "compound"})
.melt(
id_vars=["compound", "no_of_replicates"],
value_vars=["dose_1", "dose_2", "dose_3", "dose_4", "dose_5", "dose_6"],
var_name="dose",
value_name="matching_score"
)
)
cpd_score_summary_df.dose = cpd_score_summary_df.dose.replace(dose_recode_info)
summary_df = (
cpd_score_summary_pval_df
.merge(cpd_score_summary_df, on=["compound", "no_of_replicates", "dose"], how="inner")
.assign(
assay="Cell Painting",
normalization="spherized",
category="subsampled"
)
)
summary_df.to_csv(cpd_summary_file, sep="\t", index=False)
print(summary_df.shape)
summary_df.head()
```
| github_jupyter |
<table width="100%">
<tr style="border-bottom:solid 2pt #009EE3">
<td class="header_buttons">
<a href="generation_of_time_axis.zip" download><img src="../../images/icons/download.png" alt="biosignalsnotebooks | download button"></a>
</td>
<td class="header_buttons">
<a href="https://mybinder.org/v2/gh/biosignalsplux/biosignalsnotebooks/mybinder_complete?filepath=biosignalsnotebooks_environment%2Fcategories%2FPre-Process%2Fgeneration_of_time_axis.dwipynb" target="_blank"><img src="../../images/icons/program.png" alt="biosignalsnotebooks | binder server" title="Be creative and test your solutions !"></a>
</td>
<td></td>
<td class="header_icons">
<a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/home.png" alt="biosignalsnotebooks | home button"></a>
</td>
<td class="header_icons">
<a href="../MainFiles/contacts.ipynb"><img src="../../images/icons/contacts.png" alt="biosignalsnotebooks | contacts button"></a>
</td>
<td class="header_icons">
<a href="https://github.com/biosignalsplux/biosignalsnotebooks" target="_blank"><img src="../../images/icons/github.png" alt="biosignalsnotebooks | github button"></a>
</td>
<td class="header_logo">
<img src="../../images/ost_logo.png" alt="biosignalsnotebooks | project logo">
</td>
</tr>
</table>
<link rel="stylesheet" href="../../styles/theme_style.css">
<!--link rel="stylesheet" href="../../styles/header_style.css"-->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<table width="100%">
<tr>
<td id="image_td" width="15%" class="header_image_color_4"><div id="image_img" class="header_image_4"></div></td>
<td class="header_text"> Generation of a time axis (conversion of samples into seconds) </td>
</tr>
</table>
<div id="flex-container">
<div id="diff_level" class="flex-item">
<strong>Difficulty Level:</strong> <span class="fa fa-star checked"></span>
<span class="fa fa-star checked"></span>
<span class="fa fa-star"></span>
<span class="fa fa-star"></span>
<span class="fa fa-star"></span>
</div>
<div id="tag" class="flex-item-tag">
<span id="tag_list">
<table id="tag_list_table">
<tr>
<td class="shield_left">Tags</td>
<td class="shield_right" id="tags">pre-process☁time☁conversion</td>
</tr>
</table>
</span>
<!-- [OR] Visit https://img.shields.io in order to create a tag badge-->
</div>
</div>
All electrophysiological signals, collected by PLUX acquisition systems, are, in its essence, time series.
Raw data contained in the generated .txt, .h5 and .edf files consists in samples and each sample value is in a raw value with 8 or 16 bits that needs to be converted to a physical unit by the respective transfer function.
PLUX has examples of conversion rules for each sensor (in separate .pdf files), which may be accessed at <a href="http://biosignalsplux.com/en/learn/documentation">"Documentation>>Sensors" section <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> of <strong><span class="color2">biosignalsplux</span></strong> website.
<img src="../../images/pre-process/generation_of_time_axis/sensors_section.gif">
Although each file returned by <strong><span class="color2">OpenSignals</span></strong> contains a sequence number linked to each sample, giving a notion of "time order" and that can be used as x axis, working with real time units is, in many occasions, more intuitive.
So, in the present **<span class="color5">Jupyter Notebook</span>** is described how to associate a time axis to an acquired signal, taking into consideration the number of acquired samples and the respective sampling rate.
<hr>
<p class="steps">1 - Importation of the needed packages </p>
```
# Package dedicated to download files remotely
from wget import download
# Package used for loading data from the input text file and for generation of a time axis
from numpy import loadtxt, linspace
# Package used for loading data from the input h5 file
import h5py
# biosignalsnotebooks own package.
import biosignalsnotebooks as bsnb
```
<p class="steps"> A - Text Files</p>
<p class="steps">A1 - Load of support data inside .txt file (described in a <span class="color5">Jupyter Notebook</span> entitled <a href="../Load/open_txt.ipynb"><strong> "Load acquired data from .txt file" <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>) </p>
```
# Download of the text file followed by content loading.
txt_file_url = "https://drive.google.com/uc?export=download&id=1m7E7PnKLfcd4HtOASH6vRmyBbCmIEkLf"
txt_file = download(txt_file_url, out="download_file_name.txt")
txt_file = open(txt_file, "r")
# [Internal code for overwrite file if already exists]
import os
import shutil
txt_file.close()
if os.path.exists("download_file_name.txt"):
shutil.move(txt_file.name,"download_file_name.txt")
txt_file = "download_file_name.txt"
txt_file = open(txt_file, "r")
```
<p class="steps">A2 - Load of acquisition samples (in this case from the third column of the text file - list entry 2)</p>
```
txt_signal = loadtxt(txt_file)[:, 2]
```
<p class="steps">A3 - Determination of the number of acquired samples</p>
```
# Number of acquired samples
nbr_samples_txt = len(txt_signal)
from sty import fg, rs
print(fg(98,195,238) + "\033[1mNumber of samples (.txt file):\033[0m" + fg.rs + " " + str(nbr_samples_txt))
```
<p class="steps"> B - H5 Files</p>
<p class="steps">B1 - Load of support data inside .h5 file (described in the <span class="color5">Jupyter Notebook</span> entitled <a href="../Load/open_h5.ipynb"><strong> "Load acquired data from .h5 file"<img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>) </p>
```
# Download of the .h5 file followed by content loading.
h5_file_url = "https://drive.google.com/uc?export=download&id=1UgOKuOMvHTm3LlQ_e7b6R_qZL5cdL4Rv"
h5_file = download(h5_file_url, out="download_file_name.h5")
h5_object = h5py.File(h5_file)
# [Internal code for overwrite file if already exists]
import os
import shutil
h5_object.close()
if os.path.exists("download_file_name.h5"):
shutil.move(h5_file,"download_file_name.h5")
h5_file = "download_file_name.h5"
h5_object = h5py.File(h5_file)
```
<p class="steps">B2 - Load of acquisition samples inside .h5 file</p>
```
# Device mac-address.
mac_address = list(h5_object.keys())[0]
# Access to signal data acquired by the device identified by "mac_address" in "channel_1".
h5_signal = list(h5_object.get(mac_address).get("raw").get("channel_1"))
```
<p class="steps">B3 - Determination of the number of acquired samples</p>
```
# Number of acquired samples
nbr_samples_h5 = len(h5_signal)
print(fg(232,77,14) + "\033[1mNumber of samples (.h5 file):\033[0m" + fg.rs + " " + str(nbr_samples_h5))
```
As it can be seen, the number of samples is equal for both file types.
```
print(fg(98,195,238) + "\033[1mNumber of samples (.txt file):\033[0m" + fg.rs + " " + str(nbr_samples_txt))
print(fg(232,77,14) + "\033[1mNumber of samples (.h5 file):\033[0m" + fg.rs + " " + str(nbr_samples_h5))
```
So, we can simplify and reduce the number of variables:
```
nbr_samples = nbr_samples_txt
```
Like described in the Notebook intro, for generating a time-axis it is needed the <strong><span class="color4">number of acquired samples</span></strong> and the <strong><span class="color7">sampling rate</span></strong>.
Currently the only unknown parameter is the <strong><span class="color7">sampling rate</span></strong>, which can be easily accessed for .txt and .h5 files as described in <a href="../Load/signal_loading_preparatory_steps.ipynb" target="_blank">"Signal Loading - Working with File Header"<img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>.
For our acquisition the sampling rate is:
```
sampling_rate = 1000 # Hz
```
<p class="steps">AB4 - Determination of acquisition time in seconds</p>
```
# Conversion between sample number and seconds
acq_time = nbr_samples / sampling_rate
print ("Acquisition Time: " + str(acq_time) + " s")
```
<p class="steps">AB5 - Creation of the time axis (between 0 and 417.15 seconds) through <span class="color4">linspace</span> function</p>
```
time_axis = linspace(0, acq_time, nbr_samples)
print ("Time-Axis: \n" + str(time_axis))
```
<p class="steps">AB6 - Plot of the acquired signal (first 10 seconds) with the generated time-axis</p>
```
bsnb.plot(time_axis[:10*sampling_rate], txt_signal[:10*sampling_rate])
```
*This procedure can be automatically done by **generate_time** function in **conversion** module of **<span class="color2">biosignalsnotebooks</span>** package*
```
time_axis_auto = bsnb.generate_time(h5_file_url)
from numpy import array
print ("Time-Axis returned by generateTime function:")
print (array(time_axis_auto))
```
Time is a really important "dimension" in our daily lives and particularly on signal processing analysis. Without a time "anchor" like <strong><span class="color7">sampling rate</span></strong> it is very difficult to link the acquired digital data with real events.
Concepts like "temporal duration" or "time rate" become meaningless, being more difficult to take adequate conclusions.
However, as can be seen, a researcher in possession of the data to process and a single parameter (sampling rate) can easily generate a time-axis, following the demonstrated procedure.
<strong><span class="color7">We hope that you have enjoyed this guide. </span><span class="color2">biosignalsnotebooks</span><span class="color4"> is an environment in continuous expansion, so don't stop your journey and learn more with the remaining <a href="../MainFiles/biosignalsnotebooks.ipynb">Notebooks <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a></span></strong> !
<hr>
<table width="100%">
<tr>
<td class="footer_logo">
<img src="../../images/ost_logo.png" alt="biosignalsnotebooks | project logo [footer]">
</td>
<td width="40%" style="text-align:left">
<a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf" target="_blank">☌ Project Presentation</a>
<br>
<a href="https://github.com/biosignalsplux/biosignalsnotebooks" target="_blank">☌ GitHub Repository</a>
<br>
<a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">☌ How to install biosignalsnotebooks Python package ?</a>
<br>
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/signal_samples.ipynb">☌ Signal Library</a>
</td>
<td width="40%" style="text-align:left">
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/biosignalsnotebooks.ipynb">☌ Notebook Categories</a>
<br>
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/by_diff.ipynb">☌ Notebooks by Difficulty</a>
<br>
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/by_signal_type.ipynb">☌ Notebooks by Signal Type</a>
<br>
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/by_tag.ipynb">☌ Notebooks by Tag</a>
</td>
</tr>
</table>
```
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
%%html
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
$(jupyter.events).on("kernel_ready.Kernel", function () {
console.log("Auto-running all cells-below...");
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
});
}
);
</script>
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
```
# Unsupervised Learning Part 2 -- Clustering
Clustering is the task of gathering samples into groups of similar
samples according to some predefined similarity or distance (dissimilarity)
measure, such as the Euclidean distance.
<img width="60%" src='figures/clustering.png'/>
In this section we will explore a basic clustering task on some synthetic and real-world datasets.
Here are some common applications of clustering algorithms:
- Compression for data reduction
- Summarizing data as a reprocessing step for recommender systems
- Similarly:
- grouping related web news (e.g. Google News) and web search results
- grouping related stock quotes for investment portfolio management
- building customer profiles for market analysis
- Building a code book of prototype samples for unsupervised feature extraction
Let's start by creating a simple, 2-dimensional, synthetic dataset:
```
from sklearn.datasets import make_blobs
X, y = make_blobs(random_state=42)
X.shape
plt.figure(figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1])
```
In the scatter plot above, we can see three separate groups of data points and we would like to recover them using clustering -- think of "discovering" the class labels that we already take for granted in a classification task.
Even if the groups are obvious in the data, it is hard to find them when the data lives in a high-dimensional space, which we can't visualize in a single histogram or scatterplot.
Now we will use one of the simplest clustering algorithms, K-means.
This is an iterative algorithm which searches for three cluster
centers such that the distance from each point to its cluster is
minimized. The standard implementation of K-means uses the Euclidean distance, which is why we want to make sure that all our variables are measured on the same scale if we are working with real-world datastets. In the previous notebook, we talked about one technique to achieve this, namely, standardization.
<br/>
<div class="alert alert-success">
<b>Question</b>:
<ul>
<li>
what would you expect the output to look like?
</li>
</ul>
</div>
```
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3, random_state=42)
```
We can get the cluster labels either by calling fit and then accessing the
``labels_`` attribute of the K means estimator, or by calling ``fit_predict``.
Either way, the result contains the ID of the cluster that each point is assigned to.
```
labels = kmeans.fit_predict(X)
labels
np.all(y == labels)
```
Let's visualize the assignments that have been found
```
plt.figure(figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1], c=labels)
```
Compared to the true labels:
```
plt.figure(figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1], c=y)
```
Here, we are probably satisfied with the clustering results. But in general we might want to have a more quantitative evaluation. How about comparing our cluster labels with the ground truth we got when generating the blobs?
```
from sklearn.metrics import confusion_matrix, accuracy_score
print('Accuracy score:', accuracy_score(y, labels))
print(confusion_matrix(y, labels))
np.mean(y == labels)
```
<div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>
After looking at the "True" label array y, and the scatterplot and `labels` above, can you figure out why our computed accuracy is 0.0, not 1.0, and can you fix it?
</li>
</ul>
</div>
Even though we recovered the partitioning of the data into clusters perfectly, the cluster IDs we assigned were arbitrary,
and we can not hope to recover them. Therefore, we must use a different scoring metric, such as ``adjusted_rand_score``, which is invariant to permutations of the labels:
```
from sklearn.metrics import adjusted_rand_score
adjusted_rand_score(y, labels)
```
One of the "short-comings" of K-means is that we have to specify the number of clusters, which we often don't know *apriori*. For example, let's have a look what happens if we set the number of clusters to 2 in our synthetic 3-blob dataset:
```
kmeans = KMeans(n_clusters=2, random_state=42)
labels = kmeans.fit_predict(X)
plt.figure(figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1], c=labels)
kmeans.cluster_centers_
```
#### The Elbow Method
The Elbow method is a "rule-of-thumb" approach to finding the optimal number of clusters. Here, we look at the cluster dispersion for different values of k:
```
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i,
random_state=0)
km.fit(X)
distortions.append(km.inertia_)
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.show()
```
Then, we pick the value that resembles the "pit of an elbow." As we can see, this would be k=3 in this case, which makes sense given our visual expection of the dataset previously.
**Clustering comes with assumptions**: A clustering algorithm finds clusters by making assumptions with samples should be grouped together. Each algorithm makes different assumptions and the quality and interpretability of your results will depend on whether the assumptions are satisfied for your goal. For K-means clustering, the model is that all clusters have equal, spherical variance.
**In general, there is no guarantee that structure found by a clustering algorithm has anything to do with what you were interested in**.
We can easily create a dataset that has non-isotropic clusters, on which kmeans will fail:
```
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3,
random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
```
## Some Notable Clustering Routines
The following are two well-known clustering algorithms.
- `sklearn.cluster.KMeans`: <br/>
The simplest, yet effective clustering algorithm. Needs to be provided with the
number of clusters in advance, and assumes that the data is normalized as input
(but use a PCA model as preprocessor).
- `sklearn.cluster.MeanShift`: <br/>
Can find better looking clusters than KMeans but is not scalable to high number of samples.
- `sklearn.cluster.DBSCAN`: <br/>
Can detect irregularly shaped clusters based on density, i.e. sparse regions in
the input space are likely to become inter-cluster boundaries. Can also detect
outliers (samples that are not part of a cluster).
- `sklearn.cluster.AffinityPropagation`: <br/>
Clustering algorithm based on message passing between data points.
- `sklearn.cluster.SpectralClustering`: <br/>
KMeans applied to a projection of the normalized graph Laplacian: finds
normalized graph cuts if the affinity matrix is interpreted as an adjacency matrix of a graph.
- `sklearn.cluster.Ward`: <br/>
Ward implements hierarchical clustering based on the Ward algorithm,
a variance-minimizing approach. At each step, it minimizes the sum of
squared differences within all clusters (inertia criterion).
Of these, Ward, SpectralClustering, DBSCAN and Affinity propagation can also work with precomputed similarity matrices.
<img src="figures/cluster_comparison.png" width="900">
<div class="alert alert-success">
<b>EXERCISE: digits clustering</b>:
<ul>
<li>
Perform K-means clustering on the digits data, searching for ten clusters.
Visualize the cluster centers as images (i.e. reshape each to 8x8 and use
``plt.imshow``) Do the clusters seem to be correlated with particular digits? What is the ``adjusted_rand_score``?
</li>
<li>
Visualize the projected digits as in the last notebook, but this time use the
cluster labels as the color. What do you notice?
</li>
</ul>
</div>
```
from sklearn.datasets import load_digits
digits = load_digits()
# ...
# %load solutions/08B_digits_clustering.py
```
| github_jupyter |
# Clustering Sprint Challenge
Objectives:
* Describe two clustering algorithms
* Create k clusters with the k-Means algorithm
* Compare/contrast the performance of the two algorithms on two datasets
### 1. Describe two different clustering algorithms
There are many clustering algorithms with profoundly different implementations. Their objective is the same - to identify groups in unlabeled data.
Fill out the below python objects.
```
# Clustering algorithm 1:
algorithm_one_name = "K Means"
algorithm_one_description = "K centroids are initialized, randomly or through sampling \
\nThen loop through the following 2 steps: \
\n1. Each point is assigned to the nearest centroid \
\n2. New centroids are calculated by taking the means of the assigned points \
\nClusters found minimize within-cluster sum of squares, or 'inertia' \
\nWorks best when clusters are convex and isotropic\n"
# Clustering algorithm 2:
algorithm_two_name = "Spectral Clustering"
algorithm_two_description = "An affinity matrix is first computed \
\nIt contains some sort of pairwise distance/similarity measure \
\nThe matrix is then factored through eigendecomposition \
\nThe eigenvectors corresponding to the lowest nonzero eigenvalues are then selected \
\nTogether, they make up a lower dimensional feature space \
\nThe data is projected onto the lower dimension, and K Means is performed \
\nOther standard clustering algorithms are also acceptable \
\nUseful when clusters are non-convex"
print(algorithm_one_name)
print(algorithm_one_description)
print(algorithm_two_name)
print(algorithm_two_description)
```
### 2. Create k clusters with k-Means algorithm
```
# Import libraries
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans, SpectralClustering
# Dataset
set1 = pd.read_csv('https://www.dropbox.com/s/zakq7e0r8n1tob9/clustering_set1.csv?raw=1', index_col=0)
set1.head()
plt.scatter(set1['x'], set1['y']);
```
There appear to be 2 clusters.
```
# Create kmeans object
model = KMeans(n_clusters=2)
# Fit kmeans object to data
model.fit(set1.as_matrix())
# Print location of clusters learned by kmeans object
centroids = model.cluster_centers_
print('Cluster Centroids:\n' + str(centroids))
plt.scatter(set1['x'], set1['y'])
plt.plot(centroids[:,0], centroids[:,1], 'ro');
```
### 3. Compare/contrast the performance of your two algorithms with two datasets
```
# Second dataset
set2 = pd.read_csv('https://www.dropbox.com/s/zakq7e0r8n1tob9/clustering_set2.csv?raw=1', index_col=0)
set2.head()
plt.scatter(set2['x'], set2['y']);
```
The data seems to be the same as in part 1.
The clusters are mostly convex, meaning that given two points in the cluster, the points on the line connecting them are likely to also be in the cluster. They are also isotropic (the same in any direction), since they cover about 8 units of distance in both the x and y directions, and appear circular.
Because of this, I expect K means to perform well. Spectral clustering should also perform well, but wont be too useful, especially given that the clusters are linearly separable in the first place. In fact, because it discards information during the projection onto a lower dimension, it may even perform worse.
```
n_clusters=2
model1 = KMeans(n_clusters)
model2 = SpectralClustering(n_clusters)
model1.fit(set2.as_matrix())
model2.fit(set2.as_matrix())
plt.scatter(set2['x'], set2['y'], c=model1.labels_, cmap='coolwarm')
plt.title('K Means Clustering');
plt.scatter(set2['x'], set2['y'], c=model2.labels_, cmap='coolwarm')
plt.title('Spectral Clustering');
```
Interestingly, Spectral Clustering labeled some of the outlying points as part of the wrong cluster. This may have something to do with the information lost when projecting onto a lower dimension. Aside from this, both algorithms performed similarly, as expected.
| github_jupyter |
## Installing dependencies
```
!pip install simpletransformers datasets tqdm pandas
```
##Loading data from huggingface(optional)
```
import pandas as pd
from datasets import load_dataset
from tqdm import tqdm
dataset = load_dataset('tapaco', 'en')
def process_tapaco_dataset(dataset, out_file):
tapaco = []
# The dataset has only train split.
for data in tqdm(dataset["train"]):
keys = data.keys()
tapaco.append([data[key] for key in keys])
tapaco_df = pd.DataFrame(
data=tapaco,
columns=[
"language",
"lists",
"paraphrase",
"paraphrase_set_id",
"sentence_id",
"tags",
],
)
tapaco_df.to_csv(out_file, sep="\t", index=None)
return tapaco_df
tapaco_df = process_tapaco_dataset(dataset,"tapaco_huggingface.csv")
tapaco_df.head()
```
## Preprocessing TaPaCo for training(optional)
```
import pandas as pd
from tqdm import tqdm
tapaco_df = pd.read_csv("tapaco_huggingface.csv",sep="\t")
def generate_tapaco_paraphrase_dataset(dataset, out_file):
dataset_df = dataset[["paraphrase", "paraphrase_set_id"]]
non_single_labels = (
dataset_df["paraphrase_set_id"]
.value_counts()[dataset_df["paraphrase_set_id"].value_counts() > 1]
.index.tolist()
)
tapaco_df_sorted = dataset_df.loc[
dataset_df["paraphrase_set_id"].isin(non_single_labels)
]
tapaco_paraphrases_dataset = []
for paraphrase_set_id in tqdm(tapaco_df_sorted["paraphrase_set_id"].unique()):
id_wise_paraphrases = tapaco_df_sorted[
tapaco_df_sorted["paraphrase_set_id"] == paraphrase_set_id
]
len_id_wise_paraphrases = (
id_wise_paraphrases.shape[0]
if id_wise_paraphrases.shape[0] % 2 == 0
else id_wise_paraphrases.shape[0] - 1
)
for ix in range(0, len_id_wise_paraphrases, 2):
current_phrase = id_wise_paraphrases.iloc[ix][0]
for count_ix in range(ix + 1, ix + 2):
next_phrase = id_wise_paraphrases.iloc[ix + 1][0]
tapaco_paraphrases_dataset.append([current_phrase, next_phrase])
tapaco_paraphrases_dataset_df = pd.DataFrame(
tapaco_paraphrases_dataset, columns=["Text", "Paraphrase"]
)
tapaco_paraphrases_dataset_df.to_csv(out_file, sep="\t", index=None)
return tapaco_paraphrases_dataset_df
dataset_df = generate_tapaco_paraphrase_dataset(tapaco_df,"tapaco_paraphrases_dataset.csv")
dataset_df.head()
```
## Load already preprocessed version of TaPaCo
```
!wget https://github.com/hetpandya/paraphrase-datasets-pretrained-models/raw/main/datasets/tapaco/tapaco_paraphrases_dataset.csv
import pandas as pd
dataset_df = pd.read_csv("tapaco_paraphrases_dataset.csv",sep="\t")
```
##Model Training
```
from simpletransformers.t5 import T5Model
from sklearn.model_selection import train_test_split
import sklearn
dataset_df.columns = ["input_text","target_text"]
dataset_df["prefix"] = "paraphrase"
train_data,test_data = train_test_split(dataset_df,test_size=0.1)
train_data
test_data
args = {
"reprocess_input_data": True,
"overwrite_output_dir": True,
"max_seq_length": 256,
"num_train_epochs": 4,
"num_beams": None,
"do_sample": True,
"top_k": 50,
"top_p": 0.95,
"use_multiprocessing": False,
"save_steps": -1,
"save_eval_checkpoints": True,
"evaluate_during_training": False,
'adam_epsilon': 1e-08,
'eval_batch_size': 6,
'fp_16': False,
'gradient_accumulation_steps': 16,
'learning_rate': 0.0003,
'max_grad_norm': 1.0,
'n_gpu': 1,
'seed': 42,
'train_batch_size': 6,
'warmup_steps': 0,
'weight_decay': 0.0
}
model = T5Model("t5","t5-small", args=args)
model.train_model(train_data, eval_data=test_data, use_cuda=True,acc=sklearn.metrics.accuracy_score)
```
##Loading Trained Model & Prediction Using Trained Model
```
from simpletransformers.t5 import T5Model
from pprint import pprint
import os
root_dir = os.getcwd()
trained_model_path = os.path.join(root_dir,"outputs")
args = {
"overwrite_output_dir": True,
"max_seq_length": 256,
"max_length": 50,
"top_k": 50,
"top_p": 0.95,
"num_return_sequences": 5,
}
trained_model = T5Model("t5",trained_model_path,args=args)
prefix = "paraphrase"
pred = trained_model.predict([f"{prefix}: The house will be cleaned by me every Saturday."])
pprint(pred)
```
| github_jupyter |
```
import numpy as np
import h5py
import matplotlib.pyplot as plt
# Helper function to help read the h5 files.
def simple_read_data(fileName):
print(fileName)
hf = h5py.File('{}.h5'.format(fileName), 'r')
# We'll return a dictionary object.
results = {}
results['rs_glob_acc'] = np.array(hf.get('rs_glob_acc')[:])
results['rs_train_acc'] = np.array(hf.get('rs_train_acc')[:])
results['rs_train_loss'] = np.array(hf.get('rs_train_loss')[:])
# 3D array: Read as [number of times, number of epochs, number of users].
results['perUserAccs'] = np.array(hf.get('perUserAccs'))
return results
# Define the global directory path.
directoryPath = '/home/adgdri/pFedMe/results/'
```
### Datasplit-1
```
fileNames = [
# TODO: Enter the filenames of the experimental results over the different runs.
]
# Get the number of users.
numUsers = int(fileNames[0].split('u')[0].split('_')[-1])
avgPersAcc = []
perUserAcc = np.zeros((1, numUsers))
for fileName in fileNames:
ob = simple_read_data(directoryPath + fileName)
avgPersAcc.append( ob['rs_glob_acc'][-1] )
# Take the per user accuracy from the last epoch.
perUserAcc += ob['perUserAccs'][:,-1, :]
# Average out over the different runs.
perUserAcc /= len(fileNames)
print ('----------------------------------------')
print ('\n Average accuracies across all the users over different runs : %s' % avgPersAcc)
print ('\n Average accuracy across all the different runs : %f.' % np.mean(avgPersAcc) )
print ('\n Average per user accuracy averaged over different runs: \n %s.' % np.round(perUserAcc.T, 4))
print ('\n Per-user averaged accuracy: \n %f.' % np.mean(np.round(perUserAcc.T, 4)))
```
### Datasplit-2
```
fileNames = [
# TODO: Enter the filenames of the experimental results over the different runs.
]
# Get the number of users.
numUsers = int(fileNames[0].split('u')[0].split('_')[-1])
avgPersAcc = []
perUserAcc = np.zeros((1, numUsers))
for fileName in fileNames:
ob = simple_read_data(directoryPath + fileName)
avgPersAcc.append( ob['rs_glob_acc'][-1] )
# Take the per user accuracy from the last epoch.
perUserAcc += ob['perUserAccs'][:,-1, :]
# Average out over the different runs.
perUserAcc /= len(fileNames)
print ('----------------------------------------')
print ('\n Average accuracies across all the users over different runs : %s' % avgPersAcc)
print ('\n Average accuracy across all the different runs : %f.' % np.mean(avgPersAcc) )
print ('\n Average per user accuracy averaged over different runs: \n %s.' % np.round(perUserAcc.T, 4))
print ('\n Per-user averaged accuracy: \n %f.' % np.mean(np.round(perUserAcc.T, 4)))
```
### Datasplit-3
```
fileNames = [
# TODO: Enter the filenames of the experimental results over the different runs.
]
# Get the number of users.
numUsers = int(fileNames[0].split('u')[0].split('_')[-1])
avgPersAcc = []
perUserAcc = np.zeros((1, numUsers))
for fileName in fileNames:
ob = simple_read_data(directoryPath + fileName)
avgPersAcc.append( ob['rs_glob_acc'][-1] )
# Take the per user accuracy from the last epoch.
perUserAcc += ob['perUserAccs'][:,-1, :]
# Average out over the different runs.
perUserAcc /= len(fileNames)
print ('----------------------------------------')
print ('\n Average accuracies across all the users over different runs : %s' % avgPersAcc)
print ('\n Average accuracy across all the different runs : %f.' % np.mean(avgPersAcc) )
print ('\n Average per user accuracy averaged over different runs: \n %s.' % np.round(perUserAcc.T, 4))
print ('\n Per-user averaged accuracy: \n %f.' % np.mean(np.round(perUserAcc.T, 4)))
```
| github_jupyter |
```
import os
import sys
data_dir = "/home/ec2-user/pwp-summer-2019/master_thesis_nhh_2019/processed_data/"
raw_dir = "/home/ec2-user/pwp-summer-2019/master_thesis_nhh_2019/raw_data/"
import pandas as pd
import numpy as np
import random
import math
pd.set_option('display.max_columns', 999)
```
### Function for splitting the data sets based on formation-distribution
```
# Inspired by: https://stackoverflow.com/questions/56872664/complex-dataset-split-stratifiedgroupshufflesplit
def StratifiedGroupShuffleSplit(
df_main,
train_proportion=0.6,
val_proportion = 0.3,
hparam_mse_wgt = 0.1,
df_group="title",
y_var="formation_2",
norm_keys=['gr','tvd','rdep'],
seed = 42
):
np.random.seed(seed) # Set seed
df_main.index = range(len(df_main)) # Create unique index for each observation in order to reindex
df_main = df_main.reindex(np.random.permutation(df_main.index)) # Shuffle dataset
# Create empty train, val and test datasets
df_train = pd.DataFrame()
df_val = pd.DataFrame()
df_test = pd.DataFrame()
hparam_mse_wgt = hparam_mse_wgt # Must be between 0 and 1
assert(0 <= hparam_mse_wgt <= 1)
train_proportion = train_proportion # Must be between 0 and 1
assert(0 <= train_proportion <= 1)
val_proportion = val_proportion # Must be between 0 and 1
assert(0 <= val_proportion <= 1)
test_proportion = 1-train_proportion-val_proportion # Remaining in test proportion
assert(0 <= test_proportion <= 1)
# Group the data set
subject_grouped_df_main = df_main.groupby([df_group], sort=False, as_index=False)
# Find the proportion of the total for each category
category_grouped_df_main = df_main.groupby(y_var).count()[[df_group]]/len(df_main)*100
# Functoin for calculating MSE
def calc_mse_loss(df):
# Find the proportion of the total for each category in the specific data set
grouped_df = df.groupby(y_var).count()[[df_group]]/len(df)*100
# Merge the data set above with the original proportion for each category
df_temp = category_grouped_df_main.join(grouped_df, on = y_var, how = 'left', lsuffix = '_main')
# Fill NA
df_temp.fillna(0, inplace=True)
# Square the difference
df_temp['diff'] = (df_temp[df_group+'_main'] - df_temp[df_group])**2
# Mean of the squared difference
mse_loss = np.mean(df_temp['diff'])
return mse_loss
# Initialize the train/val/test set
# First three wells are assigned to train/val/test
i = 0
for well, group in subject_grouped_df_main:
group = group.sort_index()
if (i < 3):
if (i == 0):
df_train = df_train.append(pd.DataFrame(group), ignore_index=True)
i += 1
continue
elif (i == 1):
df_val = df_val.append(pd.DataFrame(group), ignore_index=True)
i += 1
continue
else:
df_test = df_test.append(pd.DataFrame(group), ignore_index=True)
i += 1
continue
# Caluclate the difference between previous dataset and the one in the loop
mse_loss_diff_train = calc_mse_loss(df_train) - calc_mse_loss(df_train.append(pd.DataFrame(group),
ignore_index=True))
mse_loss_diff_val = calc_mse_loss(df_val) - calc_mse_loss(df_val.append(pd.DataFrame(group),
ignore_index=True))
mse_loss_diff_test = calc_mse_loss(df_test) - calc_mse_loss(df_test.append(pd.DataFrame(group),
ignore_index=True))
# Calculate the total lenght so far
total_records = df_train.title.nunique() + df_val.title.nunique() + df_test.title.nunique()
# Calculate how far much much is left before the goal is reached
len_diff_train = (train_proportion - (df_train.title.nunique()/total_records))
len_diff_val = (val_proportion - (df_val.title.nunique()/total_records))
len_diff_test = (test_proportion - (df_test.title.nunique()/total_records))
len_loss_diff_train = len_diff_train * abs(len_diff_train)
len_loss_diff_val = len_diff_val * abs(len_diff_val)
len_loss_diff_test = len_diff_test * abs(len_diff_test)
loss_train = (hparam_mse_wgt * mse_loss_diff_train) + ((1-hparam_mse_wgt) * len_loss_diff_train)
loss_val = (hparam_mse_wgt * mse_loss_diff_val) + ((1-hparam_mse_wgt) * len_loss_diff_val)
loss_test = (hparam_mse_wgt * mse_loss_diff_test) + ((1-hparam_mse_wgt) * len_loss_diff_test)
# Assign to either train, val or test
if (max(loss_train,loss_val,loss_test) == loss_train):
df_train = df_train.append(pd.DataFrame(group), ignore_index=True)
elif (max(loss_train,loss_val,loss_test) == loss_val):
df_val = df_val.append(pd.DataFrame(group), ignore_index=True)
else:
df_test = df_test.append(pd.DataFrame(group), ignore_index=True)
i += 1
return df_train, df_val, df_test
```
### Function for setting up the LSTM data set
```
# Inspired by:
# https://github.com/blasscoc/LinkedInArticles/blob/master/WellFaciesLSTM/LSTM%20Facies%20Competition.ipynb
from sklearn.preprocessing import OneHotEncoder
def chunk(x, y, num_chunks, size=61, random=True):
rng = x.shape[0] - size
if random:
indx = np.int_(
np.random.rand(num_chunks) * rng) + size//2
else:
indx = np.arange(0,rng,1) + size//2
Xwords = np.array([[x[i-size//2:i+size//2+1,:]
for i in indx]])
ylabel = np.array([y[i] for i in indx])
return Xwords[0,...], ylabel
def _num_pad(size, batch_size):
return (batch_size - np.mod(size, batch_size))
def setup_lstm_stratify(df,
df_group='title',
batch_size=128,
wvars=['gr','tvd','rdep'],
y_var = 'formation',
win=9,
n_val=39
):
df = df.fillna(0)
df_grouped = df.groupby([df_group], sort=False, as_index=False)
df_x = []
df_y = []
for key,val in df_grouped:
val = val.copy()
_x = val[wvars].values
_y = val[y_var].values
__x, __y = chunk(_x, _y, 400, size=win, random=False)
df_x.extend(__x)
df_y.extend(__y)
df_x = np.array(df_x)
df_y = np.array(df_y)
#One Hot Encoding
enc = OneHotEncoder(sparse=False, categories=[range(n_val)])
df_y = enc.fit_transform(np.atleast_2d(df_y).T)
df_x = df_x.transpose(0,2,1)
# pad to batch size
num_pad = _num_pad(df_x.shape[0], batch_size)
df_x = np.pad(df_x, ((0,num_pad),(0,0),(0,0)), mode='edge')
df_y = np.pad(df_y, ((0,num_pad), (0,0)), mode='edge')
return df_x, df_y
```
### Data generator for feeding the LSTM model
```
import numpy as np
import keras
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, df_x, df_y, batch_size=128):
'Initialization'
self.df_x = df_x
self.df_y = df_y
self.batch_size = batch_size
self.indexes = np.arange(len(self.df_x))
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.df_x) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
index_epoch = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
return (self.df_x[index_epoch], self.df_y[index_epoch])
```
### Class for feature engineering and cleaning the data sets
```
class feature_engineering:
def __init__(self,df, above_below_variables, num_shifts, cols_to_remove,thresh,
log_variables, y_variable, outlier_values, var1_ratio = 'gr'
):
#Variables:
self.original_df = df
self.df = df
self.above_below_variables = above_below_variables
self.y_variable = y_variable
self.num_shifts = num_shifts
self.cols_to_remove = cols_to_remove
self.thresh = thresh
self.log_variables = log_variables
self.var1_ratio = var1_ratio
self.outlier_values = outlier_values
self.var2_ratio = log_variables
def log_values(self):
'Calculates both the log values'
for variable in self.log_variables:
self.df[variable] = np.log(self.df[variable])
#self.df = self.df.drop(self.log_variables,axis = 1)
def above_below(self):
'Add the value above and below for each column in variables'
for var in self.above_below_variables:
self.df[var+'_above'] = self.df.groupby('title')[var].shift(self.num_shifts)
self.df[var+'_below'] = self.df.groupby('title')[var].shift(self.num_shifts)
drop = [self.above_below_variables[0]+'_above', self.above_below_variables[0]+'_below']
for i in drop:
self.df = self.df.dropna(subset=[i])
def var_ratio(self):
'Generate the ratio of GR divided by specified variables'
for var in self.var2_ratio:
self.df[self.var1_ratio + '_' + var] = (self.df[self.var1_ratio]/self.df[var])
self.df[self.var1_ratio + '_' + var].loc[self.df[self.var1_ratio + '_' + var] == float('Inf')] = 0
self.df[self.var1_ratio + '_' + var].loc[self.df[self.var1_ratio + '_' + var] == -float('Inf')] = 0
def cleaning(self):
'Remove certain formations, rows with a lot of NAs and make y_variabl categorical'
self.df = self.df.drop(self.cols_to_remove,axis = 1)
self.df = self.df.dropna(thresh=self.thresh) #thresh= 12
self.df = self.df[np.isfinite(self.df['tvd'])]
self.df = self.df[(self.df.formation != 'water depth')]
self.df[self.y_variable] = self.df[self.y_variable].astype('category')
self.df = self.df[self.df[self.y_variable].cat.codes != -1]
self.df.reset_index(inplace=True, drop = True)
def xyz(self):
'Lat/Long for ML purposes'
self.df['x'] = np.cos(self.df['lat']) * np.cos(self.df['long'])
self.df['y'] = np.cos(self.df['lat']) * np.sin(self.df['long'])
self.df['z'] = np.sin(self.df['lat'])
self.df = self.df.drop(['lat','long'],axis = 1)
def single_pt_haversine(self,degrees=True):
"""
'Single-point' Haversine: Calculates the great circle distance
between a point on Earth and the (0, 0) lat-long coordinate
"""
r = 6371 # Earth's radius (km). Have r = 3956 if you want miles
# Convert decimal degrees to radians
if degrees:
lat, lng = map(math.radians, [self.df.lat, self.df.lng])
# 'Single-point' Haversine formula
a = math.sin(lat/2)**2 + math.cos(lat) * math.sin(lng/2)**2
d = 2 * r * math.asin(math.sqrt(a))
self.df['well_distance'] = [self.single_pt_haversine(x, y) for x, y in zip(lat, long)]
def drop_new_values(self):
'NAs are introduced when we calculate above and below. This function removes them'
drop = ["gr_above", "gr_below"]
for i in drop:
self.df = self.df.dropna(subset=[i])
self.df = self.df
def remove_outliers(self):
for key,value in self.outlier_values.items():
self.df = self.df[self.df[key] <= value]
self.df = self.df[self.df[key] >= 0]
def done(self):
'Return the self.df set and a dictionary of formations and their corresponding number'
self.remove_outliers()
self.remove_outliers()
self.log_values()
self.above_below()
self.cleaning()
self.xyz()
return self.df
```
### Visualizations
```
formation_colors = ['#d96c6c', '#ffe680', '#336633','#4d5766', '#cc99c9',
'#733939', '#f2eeb6', '#739978', '#333366', '#cc669c',
'#f2b6b6', '#8a8c69', '#66ccb8', '#bfbfff', '#733950',
'#b27159', '#c3d96c', '#336663', '#69698c', '#33262b',
'#bfa38f', '#2d3326', '#1a3133', '#8f66cc', '#99737d',
'#736256', '#65b359', '#73cfe6', '#673973', '#f2ba79',
'#bef2b6', '#86aab3', '#554359', '#8c6c46', '#465943',
'#73b0e6', '#ff80f6', '#4c3b26']
group_colors = ['#ff4400', '#cc804e', '#e5b800', '#403300', '#4da63f',
'#133328', '#00cad9', '#005fb3', '#0000f2', '#292259',
'#d052d9', '#33131c', '#ff6176']
def plot_well_comparison(df, well_index, formation_colors, group_colors, model_name = None, save = False):
#df['group_2'] = df['group'].astype('category').cat.codes
logs = df.loc[df["title"] == df.title.unique()[well_index]]
#logs = logs.sort_values(by='tvd')
cluster_predicted_formation=np.repeat(np.expand_dims(logs['predicted'].values,1), 100, 1)
cluster_actual_formation=np.repeat(np.expand_dims(logs['formation_2'].values,1), 100, 1)
cluster_predicted_group=np.repeat(np.expand_dims(logs['predicted_group'].values,1), 100, 1)
cluster_actual_group=np.repeat(np.expand_dims(logs['group_2'].values,1), 100, 1)
cmap_formation = colors.ListedColormap(formation_colors)
bounds_formation = [l for l in range(n_formation+1)]
norm_formation = colors.BoundaryNorm(bounds_formation, cmap_formation.N)
cmap_group = colors.ListedColormap(group_colors)
bounds_group = [l for l in range(n_group+1)]
norm_group = colors.BoundaryNorm(bounds_group, cmap_group.N)
#ztop=logs.tvd.min(); zbot=logs.tvd.max()
f, ax = plt.subplots(nrows=1, ncols=4, figsize=(8, 12))
im1=ax[0].imshow(cluster_predicted_formation, interpolation='none', aspect='auto',
cmap=cmap_formation,vmin=0,vmax=37)#, norm = norm_formation)
im2=ax[1].imshow(cluster_actual_formation, interpolation='none', aspect='auto',
cmap=cmap_formation,vmin=0,vmax=37)#, norm = norm_formation)
im3=ax[2].imshow(cluster_predicted_group, interpolation='none', aspect='auto',
cmap=cmap_group,vmin=0,vmax=12)#, norm = norm_group)
im4=ax[3].imshow(cluster_actual_group, interpolation='none', aspect='auto',
cmap=cmap_group,vmin=0,vmax=12)#, norm = norm_group)
ax[0].set_xlabel('Predicted formations')
ax[1].set_xlabel('Actual formations')
ax[2].set_xlabel('Predicted groups')
ax[3].set_xlabel('Actual groups')
ax[0].set_yticklabels([])
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['title'], fontsize=14,y=0.91)
if save:
plt.savefig(fig_dir+'prediction_'+model_name+'_'+'well_'+str(well_index)+'.png')
plt.show()
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.colors as colors
def plot_well_logs(df, well_index, formation_colors, save = False):
df['group_2'] = df['group'].map(group_dictionary)
logs = df.loc[df["title"] == df.title.unique()[well_index]]
#logs = logs.sort_values(by='tvd')
cmap_formation = colors.ListedColormap(formation_colors)
cmap_group = colors.ListedColormap(group_colors1)
ztop=logs.tvd.min(); zbot=logs.tvd.max()
cluster=np.repeat(np.expand_dims(logs['formation_2'].values,1), 100, 1)
cluster_2=np.repeat(np.expand_dims(logs['group_2'].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=8, figsize=(12, 16))
ax[0].plot(logs.gr, logs.tvd, '-g')
ax[1].plot(logs.rdep, logs.tvd, '-')
ax[2].plot(logs.rmed, logs.tvd, '-', color='r')
ax[3].plot(logs.dt, logs.tvd, '-', color='0.5')
ax[4].plot(logs.nphi, logs.tvd, '-', color='y')
ax[5].plot(logs.rhob, logs.tvd, '-', color='c')
im1=ax[6].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_formation,vmin=1,vmax=37)
im2=ax[7].imshow(cluster_2, interpolation='none', aspect='auto',
cmap=cmap_group,vmin=1,vmax=12)
for i in range(len(ax)-2):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("gr")
ax[0].set_xlim(logs.gr.min(),logs.gr.max()+10)
ax[1].set_xlabel("rdep")
ax[1].set_xlim(logs.rdep.min(),logs.rdep.max()+0.5)
ax[2].set_xlabel("rmed")
ax[2].set_xlim(logs.rmed.min(),logs.rmed.max()+0.5)
ax[3].set_xlabel("dt")
ax[3].set_xlim(logs.dt.min(),logs.dt.max()+0.5)
ax[4].set_xlabel("nphi")
ax[5].set_xlim(logs.nphi.min(),logs.nphi.max()+0.5)
ax[5].set_xlabel("rhob")
ax[5].set_xlim(logs.rhob.min(),logs.rhob.max()+0.5)
ax[6].set_xlabel('Formations')
ax[7].set_xlabel('Group')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([]);ax[4].set_yticklabels([])
ax[5].set_yticklabels([]); ax[6].set_yticklabels([]); ax[7].set_yticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['title'], fontsize=14,y=0.91)
if save:
plt.savefig(fig_dir+'well_'+str(well_index)+'.01.png')
plt.show()
```
| github_jupyter |
```
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import numpy as np
import time
from prune import *
#TODO
n_epochs = 3
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 10
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch,locked_masks,network):
#network=Net()
network.train()
for prune_step in range(5):
if prune_step > 0:
print('Start Pruning')
prune(network,locked_masks, prune_random=False, prune_weight=True, prune_bias=False, ratio=0.5,
threshold=None, threshold_bias=None, function=None, function_bias=None, prune_across_layers=True)
print('Done Pruning')
#correct(test_loader,network)
prune_diag(network,locked_masks)
test(network)
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
network = prune_grad(network,locked_masks) #zeros gradients of the pruned weights
#for n, w in network.named_parameters():
# if w.grad is not None and n in locked_masks:
# w.grad[locked_masks[n]] = 0
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
torch.save(network.state_dict(), './results/model.pth')
torch.save(optimizer.state_dict(), './results/optimizer.pth')
test(network)
def test(network):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
#initialize locked masks:
locked_masks = {n: torch.zeros(w.size(), dtype=torch.bool) for n, w in network.named_parameters()}
for n, w in network.named_parameters():
print(n)
print(w.size())
for epoch in range(1, n_epochs + 1):
train(epoch,locked_masks,network)
# test()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.005, momentum=0.9)
#initialize locked masks:
locked_masks = {n: torch.zeros(w.size(), dtype=torch.bool) for n, w in net.named_parameters()}
#start time
start=time.time()
for prune_step in range(5):
if prune_step > 0:
print('Start Pruning')
prune(net,locked_masks, prune_random=False, prune_weight=True, prune_bias=False, ratio=0.75,
threshold=None, threshold_bias=None, function=None, function_bias=None, prune_across_layers=True)
print('Done Pruning')
correct(test_loader,net)
prune_diag(net,locked_masks)
#print('prune diag time: ',time.time()-s2)
for epoch in range(4): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader,0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.view(inputs.shape[0], -1)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + prune_grad + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
net = prune_grad(net,locked_masks) #zeros gradients of the pruned weights
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 200 == 199: # print every 200 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0
correct(test_loader,net)
print('Finished Training')
print('time: ',time.time()-start)
for n, w in network.named_parameters():
#print(len(w.data.numpy().shape))
print(len(w.shape))
print(w)
```
| github_jupyter |
# Monte Carlo Localization
千葉工業大学 上田 隆一
(c) 2017 Ryuichi Ueda
This software is released under the MIT License, see LICENSE.
## はじめに
このコードは、移動ロボットの自己位置推定に使われるMonte Carlo Localization(MCL)のサンプルです。
## コードの流れ
確率的な考え方で自分の位置を推定するロボットは、自身の姿勢(位置と向き)について確証は持たず、常に自身の姿勢について曖昧な表現をします。 MCLでは、ロボットが自身の姿勢を複数の候補点で表現します。ロボットは複数の候補点のうちのどれか、あるいは候補点が分布している範囲のどこかに自身が存在するという考え(自我)を持つことになります。
候補点は、空間に浮かぶ粒という意味で「パーティクル」(粒子)と呼ばれたり、ロボットが居そうなところから候補点を一つ選んだという意味で「サンプル」(標本)と呼ばれたりします。
パーティクルの分布は、ロボットが移動や観測を行うごとに、ロボットの姿勢をよりよく推定できるように更新されます。通常、ロボットが移動するとパーティクスの分布は広がり、観測すると狭くなります。
## ヘッダ
ヘッダです。計算に対してはnumpy以外、特に変わったものは使いません。
```
%matplotlib inline
import numpy as np
from copy import copy
import math, random
import matplotlib.pyplot as plt # for plotting data
from matplotlib.patches import Ellipse # for drawing
```
## 二次元のガウス分布を表現するクラス
尤度の計算と描画に使用します。
```
class Gaussian2D:
# 共分散行列、中心の座標を属性に持つ
def __init__(self,sigma_x = 1.0, sigma_y = 1.0, cov_xy = 0.0,mu_x = 0.0, mu_y = 0.0):
self.cov = np.array([[sigma_x**2,cov_xy],[cov_xy,sigma_y**2]])
self.mean = np.array([mu_x,mu_y]).T
# ガウス分布の移動
def shift(self,delta,angle):
ca = math.cos(angle)
sa = math.sin(angle)
rot = np.array([[ca,sa],[-sa,ca]])
self.cov = rot.dot(self.cov).dot(rot.T)
self.mean = self.mean + delta
# 密度の算出
def value(self, pos):
delta = pos - self.mean
numerator = math.exp(-0.5 * (delta.T).dot(np.linalg.inv(self.cov)).dot(delta))
denominator = 2 * math.pi * math.sqrt(np.linalg.det(self.cov))
return numerator / denominator
```
## ランドマークの管理クラス
配列でもいいのですが、描画の関係で一つのクラスで複数のランドマークの位置を管理するという方式にしています。ランドマークの位置は、世界座標系のx,y座標で表されます。このランドマークをロボットが見ると、どのランドマークか(ランドマークのID)、ロボットからの距離と見える方角の3つの値が雑音付きで得られると仮定します。ランドマークのIDは、self.positionsのリストでの位置(0,1,2,...)とします。
```
class Landmarks:
def __init__(self,array):
self.positions = array
def draw(self):
xs = [ e[0] for e in self.positions]
ys = [ e[1] for e in self.positions]
plt.scatter(xs,ys,s=300,marker="*",label="landmarks",color="orange")
```
## 1回のランドマーク計測を管理するクラス
ロボットの真の姿勢とランドマークの位置情報をコンストラクタで受けて、センサの返す値をシミュレートします。
```
class Observation:
def __init__(self,robot_pos, landmark,lid):
# センサの有効範囲の設定
self.sensor_max_range = 1.0
self.sensor_min_range = 0.1
self.sensor_max_angle = math.pi / 2
self.sensor_min_angle = - math.pi /2
# ランドマークのIDを保存しておく属性。ランドマークがセンサの有効範囲にないとNoneのまま
self.lid = None
# 真の位置の情報をセットする。ロボットの真の姿勢はシミュレーション用でロボットは知らないという前提。
# 真のランドマークの位置は、ロボットは知っているのでこのインスタンスの属性として保存します。
rx,ry,rt = robot_pos
self.true_lx,self.true_ly = landmark
# ロボットからランドマークまでの距離の真値を算出
distance = math.sqrt((rx-self.true_lx)**2 + (ry-self.true_ly)**2)
if distance > self.sensor_max_range or distance < self.sensor_min_range:
return
# ロボットからランドマークがどの方向に見えるか真値を算出
direction = math.atan2(self.true_ly-ry, self.true_lx-rx) - rt
if direction > math.pi: direction -= 2*math.pi
if direction < -math.pi: direction += 2*math.pi
if direction > self.sensor_max_angle or direction < self.sensor_min_angle:
return
# 真値に混入する雑音の大きさ(標準偏差)を設定
sigma_distance = distance * 0.1 # 距離に対して10%の標準偏差
sigma_direction = math.pi * 3 / 180 # ランドマークの方向に対して3degの標準偏差
# 雑音を混ぜてセンサの値とする
self.distance = random.gauss(distance, sigma_distance)
self.direction = random.gauss(direction, sigma_direction)
# ロボット座標系での共分散行列を作っておく。あとで尤度を計算するときに使用
# x方向が奥行きで、sigma_distanceを標準偏差に設定。y方向がロボットから見て横方向の誤差で、距離*sin(3[deg])となる。
self.error_ellipse = Gaussian2D(sigma_x = sigma_distance, sigma_y = self.distance * math.sin(sigma_direction) , cov_xy = 0.0)
self.lid = lid
# 尤度の計算(遅い実装です。)
# パーティクルの姿勢とランドマークの計測値からランドマークの位置を推定し、その位置に誤差楕円を置き、
# ランドマークの真の位置が誤差楕円からどれだけ外れているかを確率密度関数の密度として返します。
# この計算はもっと簡略化できますが、描画の関係でこういう手順を踏んでいます。
# 簡略な方法: パーティクルの姿勢とランドマークの真の位置から、想定されるランドマークの距離・方向を算出し、
# 実際の距離・方向とそれぞれ比較する方法。距離の誤差の傾向、方向の誤差の傾向をそれぞれ1次元のガウス分布で表現し、
# それぞれを独立して計算して尤度を算出し、掛け算する。
def likelihood(self,particle_pos):
# パーティクルの姿勢と、このインスタンスに保存されているセンサの値から、ランドマークの位置を求める
rx, ry, rt = particle_pos
proposed_lx = rx + self.distance * math.cos(rt + self.direction)
proposed_ly = ry + self.distance * math.sin(rt + self.direction)
# このインスタンスに保存されている共分散行列を、計算されたランドマークの位置に移し、パーティクルの向きに合わせて共分散行列を回転
e = copy(self.error_ellipse)
e.shift(np.array([proposed_lx, proposed_ly]).T, rt + self.direction)
# そのままガウス分布の計算式から密度(尤度)を返します。
return e.value(np.array([self.true_lx,self.true_ly]).T)
# 描画用
def ellipse(self,robot_pos):
rx, ry, rt = robot_pos[0], robot_pos[1], robot_pos[2]
proposed_lx = rx + self.distance * math.cos(rt + self.direction)
proposed_ly = ry + self.distance * math.sin(rt + self.direction)
e = copy(self.error_ellipse)
e.shift(np.array([proposed_lx, proposed_ly]).T, rt + self.direction)
# 固有ベクトルを二つ求めて、それぞれの大きさを求めて楕円を作り、幅を計算した方の固有ベクトルの向きに楕円を回転すると誤差楕円になります。
eigen = np.linalg.eig(e.cov)
v1 = eigen[0][0] * eigen[1][0]
v2 = eigen[0][1] * eigen[1][1]
v1_direction = math.atan2(v1[1],v1[0])
elli = Ellipse([proposed_lx, proposed_ly],width=math.sqrt(np.linalg.norm(v1)),height=math.sqrt(np.linalg.norm(v2)),angle=v1_direction/3.14*180)
elli.set_alpha(0.2)
return elli
# 描画用
def draw(self,sp,robot_pos):
sp.add_artist(self.ellipse(robot_pos))
```
### ランドマークを3つ環境に置く
```
actual_landmarks = Landmarks(np.array([[-0.5,0.0],[0.5,0.0],[0.0,0.5]]))
actual_landmarks.draw()
```
### パーティクルのクラスとパーティクルフィルタのクラス
```
# パーティクルのクラス。単なる構造体
class Particle:
def __init__(self,x,y,t,w):
self.pos = np.array([x,y,t])
self.w = w
# パーティクルフィルタのクラス
class ParticleFilter:
# この実装ではコンストラクタはパーティクルの個数だけを引数にとる
def __init__(self,num):
# 空のパーティクルのリストを作って一つずつ追加していく(実装がベタ)
self.particles = []
for i in range(num):
self.particles.append(Particle(0.0,0.0,0.0,1.0/num)) # パーティクルは重みを持つ。全パーティクルの重みの合計は1。1つのパーティクルの重みは1/個数
# ロボットが動いたときにパーティクルを動かすためのメソッド
# 引数の「motion」はメソッドで、ロボットの移動を再現するためのもの。
# ロボットは自身がどのように動作するとどう姿勢が変化するかを知っており、このメソッドがその知識となる。
def moveParticles(self,fw,rot,motion):
self.resampling() # このメソッドについては後述
# パーティクルごとに移動した後の姿勢を計算し、姿勢を更新する。
for p in self.particles:
after = motion(p.pos,fw,rot)
p.pos = after
# リサンプリングのためのメソッド。
# リサンプリングは、重みがごく少数のパーティクルに偏ることを防ぐための措置で、近似していない理論上の数式では出現しない。
def resampling(self):
num = len(self.particles) # numはパーティクルの個数
ws = [e.w for e in self.particles] # 重みのリストを作る
print(sum(ws))
if sum(ws) < 1e-100: #重みの和がゼロに丸め込まれるとサンプリングできなくなるので小さな数を足しておく
ws = [e + 1e-100 for e in ws]
ps = random.choices(self.particles, weights=ws, k=num) # パーティクルのリストから、weightsのリストの重みに比例した確率で、num個選ぶ
self.particles = [Particle(*e.pos,1.0/num) for e in ps] # 選んだリストからパーティクルを取り出し、パーティクルの姿勢から重み1/numの新しいパーティクルを作成
# 描画用
def draw(self,c="blue",lbl="particles"):
xs = [p.pos[0] for p in self.particles]
ys = [p.pos[1] for p in self.particles]
vxs = [math.cos(p.pos[2]) for p in self.particles]
vys = [math.sin(p.pos[2]) for p in self.particles]
plt.quiver(xs,ys,vxs,vys,color=c,label=lbl,alpha=0.7)
```
### ロボットを表現するクラス
ロボットはランドマークを観測して1ステップ進んで・・・を繰り返します。
```
class Robot:
def __init__(self,x,y,rad):
random.seed()
# actual_poses: ロボットの姿勢の真値を1ステップごとに記録したもの
# (ロボットのクラス内にいるけどロボットはこの情報を使えない)
self.actual_poses = [np.array([x,y,rad])]
# パーティクルフィルタの準備(パーティクル数30個)
self.pf = ParticleFilter(30)
# ロボットの動作をシミュレートするメソッド。シミュレーションだけでなく、ロボットがパーティクルを移動するときにも用いる。
# つまり実機に実装する場合もこのメソッドが必要となる。雑音の度合いは事前に計測するか、
# ざっくり決めてフィルタのロバスト性に頼る。
def motion(self, pos, fw, rot):
# fwだけ前進してその後rotだけ回転。雑音を混入させる
actual_fw = random.gauss(fw,fw/10) #進む距離に対して標準偏差10%の雑音を混入
dir_error = random.gauss(0.0, math.pi / 180.0 * 3.0) # 前進方向がヨレる雑音を標準偏差3[deg]で混入
px, py, pt = pos
# 移動後の位置を算出
x = px + actual_fw * math.cos(pt + dir_error)
y = py + actual_fw * math.sin(pt + dir_error)
# 雑音込みの回転各を算出。rotに対して標準偏差10%の雑音を混ぜる
actual_rot = random.gauss(rot,rot/10)
t = pt + dir_error + actual_rot # さらにヨレの分の角度を足す
return np.array([x,y,t])
# ロボットが動くときに呼び出すメソッド。ロボットの位置の更新とパーティクルの位置の更新
def move(self,fw,rot):
self.actual_poses.append(self.motion(self.actual_poses[-1],fw,rot))
self.pf.moveParticles(fw,rot,self.motion)
# ロボットがランドマーク観測するときに呼び出すメソッド
def observation(self,landmarks):
obss = []
for i,landmark in enumerate(landmarks.positions): # 3つあるランドマークを1つずつ観測
obss.append(Observation(self.actual_poses[-1],landmark,i))
obss = list(filter(lambda e : e.lid != None, obss)) # 観測データのないものを除去
# 重みに尤度をかける
for obs in obss:
for p in self.pf.particles:
p.w *= obs.likelihood(p.pos)
# 描画用に観測のリストを返す
return obss
# 描画用
def draw(self,sp,observations):
for obs in observations:
for p in self.pf.particles:
obs.draw(sp,p.pos)
self.pf.draw()
xs = [e[0] for e in self.actual_poses]
ys = [e[1] for e in self.actual_poses]
vxs = [math.cos(e[2]) for e in self.actual_poses]
vys = [math.sin(e[2]) for e in self.actual_poses]
plt.quiver(xs,ys,vxs,vys,color="red",label="actual robot motion")
```
## 描画用の関数
説明は割愛。
```
def draw(i,observations):
fig = plt.figure(i,figsize=(8, 8))
sp = fig.add_subplot(111, aspect='equal')
sp.set_xlim(-1.0,1.0)
sp.set_ylim(-0.5,1.5)
robot.draw(sp,observations)
actual_landmarks.draw()
plt.legend()
```
## シミュレーションの実行
図の説明:
* 赤の矢印: 真の姿勢
* 星: ランドマークの位置
* 青の矢印: パーティクルの姿勢
* 楕円: ランドマークの観測値と各パーティクルの姿勢からランドマークの位置を計算したものと、その位置の曖昧さを表す共分散行列
```
robot = Robot(0,0,0) # ロボットを原点に
# 観測、描画、移動の繰り返し
for i in range(0,18):
obss = robot.observation(actual_landmarks)
draw(i,obss)
robot.move(0.2,math.pi / 180.0 * 20)
```
| github_jupyter |
# DSA simulations
```
% matplotlib inline
%config InlineBackend.figure_format = 'retina'
%load_ext line_profiler
%load_ext autoreload
%autoreload 2
from __future__ import division
import numpy as np
import glob, os
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.dpi'] = 2.5 * matplotlib.rcParams['figure.dpi']
import astropy
from astropy.time import Time
import enterprise
from enterprise.pulsar import Pulsar
import enterprise_extensions
from enterprise_extensions import models, model_utils
import libstempo as T2, libstempo.toasim as LT, libstempo.plot as LP
from ephem import Ecliptic, Equatorial
datadir = '../partim_no_noise/'
def figsize(scale):
fig_width_pt = 513.17 #469.755 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
return fig_size
#plt.rcParams.update(plt.rcParamsDefault)
params = {'backend': 'pdf',
'axes.labelsize': 10,
'lines.markersize': 4,
'font.size': 10,
'xtick.major.size':6,
'xtick.minor.size':3,
'ytick.major.size':6,
'ytick.minor.size':3,
'xtick.major.width':0.5,
'ytick.major.width':0.5,
'xtick.minor.width':0.5,
'ytick.minor.width':0.5,
'lines.markeredgewidth':1,
'axes.linewidth':1.2,
'legend.fontsize': 7,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'savefig.dpi':200,
'path.simplify':True,
'font.family': 'serif',
'font.serif':'Times',
'text.latex.preamble': [r'\usepackage{amsmath}',r'\usepackage{amsbsy}',
r'\DeclareMathAlphabet{\mathcal}{OMS}{cmsy}{m}{n}'],
'text.usetex':True,
'figure.figsize': figsize(0.5)}
plt.rcParams.update(params)
```
## Useful functions
```
import pandas as pd
# Get (equatorial) position of pulsars from name
def pos_from_name(name):
pos = []
for p in name:
if '+' in p:
tmp = p.split('+')
raj = '{0}:{1}:00.00'.format(tmp[0][1:3],tmp[0][3:])
decj = '+{0}:{1}:00.00'.format(tmp[1][:2],tmp[1][2:])
else:
tmp = p.split('-')
raj = '{0}:{1}:00.00'.format(tmp[0][1:3],tmp[0][3:])
decj = '-{0}:{1}:00.00'.format(tmp[1][:2],tmp[1][2:])
eq = Equatorial(raj, decj)
if 'B' in p: epoch = '1950'
elif 'J' in p: epoch= '2000'
ec = Ecliptic(eq, epoch=str(epoch))
pos.append([float(eq.ra), float(eq.dec)])
return np.array(pos)
# Get name of pulsar from (equatorial) position
def name_from_pos(pos):
name = []
for p in pos:
eq = Equatorial(p[0], p[1])
ra = str(eq.ra)
dec = str(eq.dec)
#
if float(ra.split(':')[0]) < 10:
ra = '0' + ''.join(ra.split(':')[:2])
else:
ra = ''.join(ra.split(':')[:2])
#
if np.abs(float(dec.split(':')[0])) < 10:
dec = ':'.join(['-0'+dec.split(':')[0][1:],dec.split(':')[1]])
else:
dec = ':'.join(dec.split(':')[:2])
if float(dec.split(':')[0]) < 0:
dec = ''.join(dec.split(':')[:2])
elif float(dec.split(':')[0]) > 0:
dec = '+' + ''.join(dec.split(':')[:2])
else:
dec = ''.join(dec.split(':')[:2])
#
name.append('J' + ra + dec)
return np.array(name)
# Draw new random positions based on a sample
def invtran_sample(pos, size):
x, y = pos[:,0], pos[:,1]
hist, xbin, ybin = np.histogram2d(x, y, bins=(10, 10))
xbinc = xbin[:-1] + np.diff(xbin)/2.0
ybinc = ybin[:-1] + np.diff(ybin)/2.0
cdf = np.cumsum(hist.ravel())
cdf = cdf / cdf[-1]
values = np.random.rand(size)
value_bins = np.searchsorted(cdf, values)
x_idx, y_idx = np.unravel_index(value_bins,
(len(xbinc),
len(ybinc)))
delta_x = np.diff(xbin)[0]
delta_y = np.diff(ybin)[0]
if size == 1:
random_from_cdf = [xbinc[x_idx][0] + np.random.uniform(-delta_x/2.0, delta_x/2.0),
ybinc[y_idx][0] + np.random.uniform(-delta_y/2.0, delta_y/2.0)]
else:
random_from_cdf = np.column_stack((xbinc[x_idx] + np.random.uniform(-delta_x/2.0, delta_x/2.0),
ybinc[y_idx] + np.random.uniform(-delta_y/2.0, delta_y/2.0)))
return random_from_cdf
def year2mjd(year):
# rounds to nearest year
return float(Time("{}-01-01T00:00:00".format(str(int(np.rint(year)))),
format='isot').mjd)
def mjd2year(mjd):
return float(Time(mjd, format='mjd').decimalyear)
```
## Process data
```
data = pd.read_csv('../data/v2/RMSonlyvsTime2018-DSA2000-LoseAO.csv',header=0,skip_blank_lines=True,)
# Correcting for mistake in spreadsheet
for ii,name in data.iterrows():
print name
#if name.Observatory == 'GBT' and name.PSR == 'NEWPSR' and name.Epoch2 >= 2025:
# data.iloc[ii,6] = name.RMS3
# data.iloc[ii,7] = np.nan
# data.iloc[ii,8] = np.nan
# names of all real gbt and ao pulsars
real_pulsars_gbt = np.array([name.PSR for ii,name in data.iterrows() if 'NEWPSR' not in name.PSR and name.Observatory=='GBT'])
real_pulsars_ao = np.array([name.PSR for ii,name in data.iterrows() if 'NEWPSR' not in name.PSR and name.Observatory=='AO'])
# names of all fake gbt and ao pulsars (these are all NEWPSR)
fake_pulsars_gbt = np.array([name.PSR for ii,name in data.iterrows() if 'NEWPSR' in name.PSR and name.Observatory=='GBT'])
fake_pulsars_ao = np.array([name.PSR for ii,name in data.iterrows() if 'NEWPSR' in name.PSR and name.Observatory=='AO'])
# Get equatorial positions (ra, dec...in radians) from names
pos_pulsars_gbt = pos_from_name(real_pulsars_gbt)
pos_pulsars_ao = pos_from_name(real_pulsars_ao)
# Generate new fake pulsar positions
fakepos_pulsars_gbt = np.array([invtran_sample(pos_pulsars_gbt, size=1) for ii in range(len(fake_pulsars_gbt))])
fakepos_pulsars_ao = np.array([invtran_sample(pos_pulsars_ao, size=1) for ii in range(len(fake_pulsars_ao))])
# Generate new fake pulsar names
fake_pulsars_gbt = name_from_pos(fakepos_pulsars_gbt)
fake_pulsars_ao = name_from_pos(fakepos_pulsars_ao)
# Make copy of data frame and replace NEWPSR with new names
data_copy = data.copy(deep=True)
ct_gbt = 0
ct_ao = 0
for ii,name in data_copy.iterrows():
# New GBT pulsars
if name.Observatory == 'GBT' and name.PSR=='NEWPSR':
#tmp_pos = invtran_sample(pos_pulsars_gbt, size=1)
#tmp_name = name_from_pos(tmp_pos)
#data_copy.iloc[ii,0] = tmp_name[0]
data_copy.iloc[ii,0] = fake_pulsars_gbt[ct_gbt]
ct_gbt += 1
# New AO pulsars
if name.Observatory == 'AO' and name.PSR=='NEWPSR':
#tmp_pos = invtran_sample(pos_pulsars_ao, size=1)
#tmp_name = name_from_pos(tmp_pos)
#data_copy.iloc[ii,0] = tmp_name[0]
data_copy.iloc[ii,0] = fake_pulsars_ao[ct_ao]
ct_ao += 1
# Create new data columns for RAJ and DECJ
data_copy = data_copy.assign(RAJ=pd.Series(np.concatenate([pos_pulsars_gbt[:,0],
fakepos_pulsars_gbt[:,0],
pos_pulsars_ao[:,0],
fakepos_pulsars_ao[:,0]])).values)
data_copy = data_copy.assign(DECJ=pd.Series(np.concatenate([pos_pulsars_gbt[:,1],
fakepos_pulsars_gbt[:,1],
pos_pulsars_ao[:,1],
fakepos_pulsars_ao[:,1]])).values)
```
### Dealing with Burning Dumpster sim
```
data = pd.read_csv('../data/v2/RMSonlyvsTime2018-DSA2000-BurningDumpster.csv',header=0,skip_blank_lines=True,)
data_copy = data.copy(deep=True)
final_data = pd.read_csv('../data/v2/status_quo.csv')
data_copy.PSR = final_data.PSR
data_copy = data_copy.assign(RAJ=final_data.RAJ)
data_copy = data_copy.assign(DECJ=final_data.DECJ)
# Write to csv
data_copy.to_csv('../data/v2/burning_dumpster.csv')
```
## Creating par files
```
sim_type = 'status_quo' # 'status_quo', 'lose_ao', or 'burning_dumpster'
# Read from csv
data_copy = pd.read_csv('../data/v2/{}.csv'.format(sim_type))
for ii,name in data_copy.iterrows():
# Get coordinates for par file
tmp = Equatorial(data_copy.iloc[ii].RAJ, data_copy.iloc[ii].DECJ, epoch='2000')
# Set PEPOCHs to be 5 years in
rms = np.array([name.RMS1, name.RMS2, name.RMS3,
name.RMS4, name.RMS5])
rms[rms=='gap'] = np.inf
rms = np.array(rms,dtype=float)
epoch = np.array([name.Epoch1, name.Epoch2, name.Epoch3,
name.Epoch4, name.Epoch5])
start = epoch[np.where(~np.isnan(rms))[0][0]]
with open('../data/template.par', 'r') as fil:
pardata = fil.read()
with open('../data/v2/par/' + name.PSR + '.par', 'w') as filnew:
for line in pardata.split('\n'):
if 'PSR' in line:
print >>filnew, '\t\t'.join([line.split()[0], name.PSR])
elif 'RAJ' in line:
print >>filnew, '\t\t'.join([line.split()[0], str(tmp.ra),
line.split()[2], line.split()[3]])
elif 'DECJ' in line:
print >>filnew, '\t\t'.join([line.split()[0], str(tmp.dec),
line.split()[2], line.split()[3]])
elif 'PEPOCH' in line or 'POSEPOCH' in line or 'DMEPOCH' in line:
print >>filnew, '\t\t'.join([line.split()[0], str(int(year2mjd(start+5)))])
else:
print >>filnew, line
print name.PSR, start
```
## Creating tim files
```
sim_type = 'burning_dumpster' # 'status_quo', 'lose_ao', or 'burning_dumptser'
# Read from csv
data_copy = pd.read_csv('../data/v2/{}.csv'.format(sim_type))
dsa_sims = []
start_data = []
for jj,name in data_copy.iterrows():
psrname = name.PSR
rms = np.array([name.RMS1, name.RMS2, name.RMS3,
name.RMS4, name.RMS5])
rms[rms=='gap'] = np.inf
rms = np.array(rms,dtype=float)
epoch = np.array([name.Epoch1, name.Epoch2, name.Epoch3,
name.Epoch4, name.Epoch5])
### Start and End year
start_yr = epoch[np.where(~np.isnan(rms))[0][0]]
start_yr_mjd = year2mjd(start_yr)
#
end_yr = 2045
end_yr_mjd = year2mjd(end_yr)
### Spacing and obstimes
spacing = 365.25 / 20.0 # days between observations
#
obstimes = np.arange(start_yr_mjd, end_yr_mjd, spacing)
# removing data gaps
for kk,rmss in enumerate(rms):
if np.isinf(rmss):
mask = np.logical_and(obstimes >= year2mjd(epoch[kk]),
obstimes <= year2mjd(epoch[kk+1]))
obstimes = obstimes[~mask]
### Segmenting obstimes based on hardware/telescope switches
stops = list(epoch[np.where(~np.isnan(rms))[0]]) + [end_yr]
stops = [year2mjd(yr) for yr in stops]
errors = list(rms[np.where(~np.isnan(rms))[0]])
### Masking sections of data based on these stops
masks = []
for kk,stop in enumerate(stops):
if kk < len(stops)-1:
masks.append(np.logical_and(obstimes >= stops[kk],
obstimes <= stops[kk+1]))
### Applying RMS errors
toa_errs = np.ones_like(obstimes)
for kk,mask in enumerate(masks):
toa_errs[mask] *= float(errors[kk])
### Make fake dataset
par = '../data/v2/par/' + psrname + '.par'
dsa_sims.append(LT.fakepulsar(parfile=par, obstimes=obstimes,
toaerr=toa_errs,
observatory=name.Observatory.lower()))
# white noise
LT.add_efac(dsa_sims[jj])
# save .tim
dsa_sims[jj].savetim('../data/v2/tim_{}/'.format(sim_type)
+ dsa_sims[jj].name + '.tim')
###
start_data.append([psrname, start_yr, start_yr_mjd])
print psrname, par, start_yr_mjd, end_yr_mjd, len(stops), len(masks), len(errors)
start_data = np.array(start_data)
start_data[start_data[:,1].argsort()]
fil = open('sims_psr_startdata_{}.txt'.format(sim_type),'w')
for line in start_data[start_data[:,1].argsort()]:
print >>fil, line[0], line[1], line[2]
fil.close()
```
# Read In And Check Pulsars
```
import enterprise
from enterprise.pulsar import Pulsar
from enterprise.signals import parameter
from enterprise.signals import white_signals
from enterprise.signals import gp_signals
from enterprise.signals import signal_base
import enterprise_extensions
from enterprise_extensions import models, model_utils
psr1 = Pulsar('../data/v2/par/J2234+0944.par', '../data/v2/tim_status_quo/J2234+0944.tim', ephem='DE436')
psr2 = Pulsar('../data/v2/par/J2234+0944.par', '../data/v2/tim_lose_ao/J2234+0944.tim', ephem='DE436')
psr3 = Pulsar('../data/v2/par/J2234+0944.par', '../data/v2/tim_burning_dumpster/J2234+0944.tim', ephem='DE436')
plt.errorbar([mjd2year(p) for p in psr1.toas/86400.0],
psr1.residuals/1e-6, psr1.toaerrs/1e-6,
alpha=0.3, fmt='.')
#plt.errorbar([mjd2year(p) for p in psr2.toas/86400.0],
# psr2.residuals/1e-6, psr2.toaerrs/1e-6,
# alpha=0.3, fmt='.')
plt.errorbar([mjd2year(p) for p in psr3.toas/86400.0],
psr3.residuals/1e-6, psr3.toaerrs/1e-6,
alpha=0.3, fmt='.')
plt.xlabel(r'Year')
plt.ylabel(r'Residuals [$\mu$s]')
plt.title(psr1.name)
```
| github_jupyter |
# Visualizing time-resolved LFP-Spiking Analysis of CRCNS PFC2 Dataset
```
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('/Users/rdgao/Documents/code/research/spectralCV/')
sys.path.append('/Users/rdgao/Documents/code/research/neurodsp/')
%matplotlib inline
# imports
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import neurodsp as ndsp
from scv_funcs import lfpca
from scv_funcs import utils
import pandas as pd
import pandas.plotting as pdplt
CKEYS = plt.rcParams['axes.prop_cycle'].by_key()['color']
font = {'family' : 'arial',
'weight' : 'regular',
'size' : 13}
import matplotlib
matplotlib.rc('font', **font)
def compute_pairwise_corr(spkstats, lfpstats, sub_inds, corr_type='spearman', log_power=False, plot_matrix=False):
fr,isi_cv = spkstats[sub_inds,0], spkstats[sub_inds,2]
if log_power:
pwr = lfpstats[sub_inds,:,0]
else:
pwr = np.log10(lfpstats[sub_inds,:,0])
scv = lfpstats[sub_inds,:,1]
ks_stat = lfpstats[sub_inds,:,2]
num_freqs = pwr.shape[1]
rho = np.zeros((5,5,num_freqs))
for freq in range(num_freqs):
df = pd.DataFrame(np.array([fr, isi_cv, pwr[:,freq], scv[:,freq], ks_stat[:,freq]]).T, columns=labels)
corr = df.corr(method='spearman').as_matrix()
rho[:,:,freq] = corr
if plot_matrix:
axes = pdplt.scatter_matrix(df, alpha=0.2, figsize=(7, 7), diagonal='kde');
for i, j in zip(*plt.np.triu_indices_from(axes, k=1)):
# label with correlation coefficient
axes[i, j].annotate("%.3f" %corr[i,j], (0.8, 0.8), xycoords='axes fraction', ha='center', va='center')
return rho
def plot_corrs(corr_mat,f1,f2,labels,freq=None,YL=(-0.5,0.5)):
if freq is None:
plt.plot(corr_mat[f1,f2,:,:].T)
else:
plt.plot(corr_mat[f1,f2,freq,:].T)
num_chan = corr_mat.shape[-1]
re_div = num_chan/3.*2.-0.5 # region division
plt.xticks([re_div], ["PFC | CA1"])
plt.ylim(YL)
plt.plot([re_div,re_div], plt.ylim(), 'k--', alpha=0.5)
plt.plot(plt.xlim(), [0,0], 'r--', alpha=0.5)
plt.title(labels[f1]+' : '+labels[f2])
# load LFP data
basefolder ='/Users/rdgao/Documents/data/CRCNS/pfc2/EE.049/EE.049'
lfp_file = basefolder+ '_LFP.mat'
lfp_struct = sp.io.loadmat(lfp_file, squeeze_me=True)
data = lfp_struct['lfp'][:96,:]
t = lfp_struct['t']
fs = lfp_struct['fs'] #1250
# load spike data
spike_file = basefolder + '_Behavior.mat'
spike_struct = sp.io.loadmat(spike_file, squeeze_me=True)
cell_info = spike_struct['spikeph']
spike_ind = spike_struct['spikeind']
spike_t = spike_struct['spiket'] # 20kHz
# organize spikes into cells, shanks, and areas
# spkt_c, spkt_sh, spkt_re = organize_spikes(spike_t, spike_ind, cell_info)
# _, cells_per_shank = np.unique(cell_info[:,1], return_counts=True)
nperseg= 1250
noverlap= int(nperseg/2)
N_skip=25
filt_bands = [(4,12), (24,40), (150,200)]
#filt_bands = [(0,4),(6,12),(14,20),(30,40),(50,70),(70,100),(150,200)]
```
# Load results
```
data_loaded = np.load('../results/pfc2/EE049_filt.npz')
t_win = data_loaded['t_win']
spkstats_cell = data_loaded['spkstats_cell']
spkstats_shank = data_loaded['spkstats_shank']
spkstats_re = data_loaded['spkstats_re']
lfpstats_rw = data_loaded['lfpstats_rw']
lfpstats_summary = data_loaded['lfpstats_summary']
sigpower_all = data_loaded['sigpower_all']
filt_bands = data_loaded['filt_bands']
# get task and rest indices
rest_inds = []
task_inds = []
for i in range(spike_struct['SessionNP'].shape[0]):
rest_inds.append(np.where(np.logical_and(t_win >= spike_struct['SessionNP'][i,0],t_win <= spike_struct['SessionNP'][i,1]))[0])
task_inds.append(np.where(np.logical_and(t_win >= spike_struct['SessionNP'][i,1],t_win <= spike_struct['SessionNP'][i,2]))[0])
rest_inds = np.concatenate(rest_inds)
task_inds = np.concatenate(task_inds)
```
# Visualizing results
### Computing correlations
```
corr_labels = ['Per Shank', 'All PFC Cells', 'All CA1 Cells']
labels = ['Cell_FR','Cell_ISICV','LFP_PWR','LFP_SCV', 'LFP_KS']
sub_inds = np.arange(len(t_win))[2:-2:] # all indices
sub_inds = task_inds
shank_corr = []
pfc_corr = []
ca1_corr = []
for sh in range(spkstats_shank.shape[0]):
# 2 LFP channels per shank
for chan in range(2):
shank_corr.append(compute_pairwise_corr(spkstats_shank[sh], lfpstats_rw[sh*2+chan], sub_inds))
pfc_corr.append(compute_pairwise_corr(spkstats_re[0], lfpstats_rw[sh*2+chan], sub_inds))
ca1_corr.append(compute_pairwise_corr(spkstats_re[1], lfpstats_rw[sh*2+chan], sub_inds))
# features, features, freq, shank
shank_corr = np.stack(shank_corr, axis=3)
pfc_corr = np.stack(pfc_corr, axis=3)
ca1_corr = np.stack(ca1_corr, axis=3)
all_corr = [shank_corr,pfc_corr,ca1_corr]
plt.figure(figsize=(18,9))
for freq in range(lfpstats_rw.shape[2]):
for ind in range(3):
rho, pv = utils.corrcoefp(lfpstats_rw[:,sub_inds,freq,ind])
plt.subplot(3,7,ind*7+freq+1)
utils.corr_plot(rho,bounds=[-1,1])
plt.xticks([])
plt.yticks([])
plt.title(labels[ind+2]+'%i-%iHz'%(filt_bands[freq][0],filt_bands[freq][1]) )
plt.tight_layout()
# FR-ISI
plt.figure(figsize=(15,8))
for ind in range(3):
plt.subplot(2,3,ind+1)
plot_corrs(all_corr[ind],f1=0,f2=1,labels=labels, YL=(-0.5, 0.8))
plt.ylabel(corr_labels[ind])
plt.legend(filt_bands)
plt.subplot(2,3,4)
plot_corrs(all_corr[0],f1=2,f2=3,labels=labels, YL=(-1, 0.8))
plt.subplot(2,3,5)
plot_corrs(all_corr[0],f1=2,f2=4,labels=labels, YL=(-1, 0.8))
plt.subplot(2,3,6)
plot_corrs(all_corr[0],f1=3,f2=4,labels=labels, YL=(-1, 0.8))
plt.tight_layout()
plot_freq = [0,1,2,3,4,5,6]
plt.figure(figsize=(18,10))
for corr_ind in range(3):
for i,f1 in enumerate([0,1]):
for j,f2 in enumerate([2,3,4]):
plt.subplot(3,6,i*3+j+1+corr_ind*6)
plot_corrs(all_corr[corr_ind],f1,f2,labels, freq=plot_freq)
plt.yticks([])
plt.subplot(3,6,1+corr_ind*6)
plt.ylabel(corr_labels[corr_ind], fontsize=18)
plt.yticks([-0.5,0.5])
# plt.legend(filt_bands[plot_freq])
plt.tight_layout()
```
| github_jupyter |
# Look-Aside Cache for MongoDB
### This is a sample notebook for using Aerospike as a read/look-aside cache
- This notebook demonstrates the use of Aerospike as a cache using Mongo as another primary datastore
- It is required to run Mongo as a separte container using `docker run --name some-mongo -d mongo:latest`
To test: Run the `get_data(key, value)` method once - to fetch from Mongo and populate Aerospike
Another run will fetch the data from Aerospike cache
#### Ensure that the Aerospike Database is running
```
!asd >& /dev/null
!pgrep -x asd >/dev/null && echo "Aerospike database is running!" || echo "**Aerospike database is not running!**"
```
#### Import all dependencies
```
import aerospike
import pymongo
from pymongo import MongoClient
import sys
```
## Configure the clients
The configuration is for
- Aerospike database running on port 3000 of localhost (IP 127.0.0.1) which is the default.
- Mongo running in a separate container whose IP can be found by `docker inspect <containerid> | grep -i ipaddress`
Modify config if your environment is different (Aerospike database running on a different host or different port).
```
# Define a few constants
AEROSPIKE_HOST = "0.0.0.0"
AEROSPIKE_PORT = 3000
AEROSPIKE_NAMESPACE = "test"
AEROSPIKE_SET = "demo"
MONGO_HOST = "172.17.0.3"
MONGO_PORT = 27017
MONGO_DB = "test-database"
MONGO_COLLECTION = "test-collection"
#Aerospike configuration
aero_config = {
'hosts': [ (AEROSPIKE_HOST, AEROSPIKE_PORT) ]
}
try:
aero_client = aerospike.client(aero_config).connect()
except:
print("Failed to connect to the cluster with", aero_config['hosts'])
sys.exit(1)
print("Connected to Aerospike")
#Mongo configuration
try:
mongo_client = MongoClient(MONGO_HOST, MONGO_PORT)
print("Connected to Mongo")
except:
print("Failed to connect to Mongo")
sys.exit(1)
```
#### Store data in Mongo and clear the keys in Aerospike if any
```
db = mongo_client[MONGO_DB]
collection = db[MONGO_COLLECTION]
def store_data(data_id, data):
m_data = {data_id: data}
collection.drop()
aero_key = ('test', 'demo', data_id)
#aero_client.remove(aero_key)
post_id = collection.insert_one(m_data)
store_data("key", "value")
```
#### Fetch the data. In this instance we are using a simple key value pair.
If the data exists in the cache it is returned, if not data is read from Mongo, put in the cache and then returned
```
def get_data(data_id, data):
aero_key = (AEROSPIKE_NAMESPACE, AEROSPIKE_SET, data_id)
#aero_client.remove(aero_key)
data_check = aero_client.exists(aero_key)
if data_check[1]:
(key, metadata, record) = aero_client.get(aero_key)
print("Data retrieved from Aerospike cache")
print("Record::: {} {}".format(data_id, record['value']))
else:
mongo_data = collection.find_one({data_id: data})
print("Data not present in Aerospike cache, retrieved from mongo {}".format(mongo_data))
aero_client.put(aero_key, {'value': mongo_data[data_id]})
get_data("key", "value")
```
| github_jupyter |
# Fighting Game AI
## Introduction
Do you know about Pokemon? Well this turn-based fighting game is similar to Pokemon, but we do not send out monsters to duke it out. Instead, the battle is between humans. Since the actions that can be taken by both players are the same, there is some form of game theory involved and we can determine the Nash equilibrium of the game through the use of Monte Carlo Tree Search (MCTS).
Players start with 100 health. For each turn, players can choose one of four actions available:
1. <b>Attack</b> <i>{DAMAGE SKILL}</i> (opponent is damaged for 8-12 health)
2. <b>Heal</b> <i>{HEAL SKILL}</i> (self is healed for 7-10 health)
3. <b>Power Up</b> <i>{BUFF SKILL}</i> (effectiveness of damage skills increase by 25%, to a maximum of 100%)
4. <b>Superspeed</b> <i>{BUFF SKILL}</i> (chance of dodging a damage skill or sudden death effect increases by 5%, to a maximum of 100%)
Starting from the 11th turn, sudden death will begin, and the players will be hit for 2 * (number_of_turns_passed - 10) health at the end of their turn. After turn 50, if both players have not fainted yet, the game is a draw. Players are able to dodge the effects of sudden death as well.
For buff skills, we will use the example of the skill "power up" to show how effects can stacked. When the player has used "power up" twice, the effectiveness of damage skills is increased by 50%.
You can edit the various variables to see how the sequence of actions that lead to the best result would differ.
```
!pip install mcts_simple
import numpy as np
import random
from gym import Env
from gym.spaces import Discrete, Box
class Skill:
def __init__(self, player):
self.name = None
self.type = None
self.description = None
self.player = player
def use(self, opponent):
pass
def damage(self, opponent, damage):
if random.random() < opponent.dodge_rate:
if self.player.output:
print(f"Player {opponent.name} dodged the incoming attack.")
else:
original_health = opponent.health
opponent.health = max(opponent.health - int(damage * self.player.damage_multiplier), 0)
if self.player.output:
print(f"Player {opponent.name} is hit for {original_health - opponent.health} damage by the skill {self.name}.")
def heal(self, damage):
original_health = self.player.health
self.player.health = min(self.player.health + damage, 100)
if self.player.output:
print(f"Player {self.player.name} is healed for {self.player.health - original_health} damage using the skill {self.name}.")
def increase_damage(self, multiplier):
original_damage_multiplier = self.player.damage_multiplier
self.player.damage_multiplier = max(min(self.player.damage_multiplier + multiplier, 2), 0)
if self.player.output:
print(f"Player {self.player.name}'s damage has increased by {self.player.damage_multiplier - original_damage_multiplier:.0%} using the skill {self.name}.")
def increase_dodge(self, dodge_rate):
original_dodge_rate = self.player.dodge_rate
self.player.dodge_rate = max(min(self.player.dodge_rate + dodge_rate, 1), 0)
if self.player.output:
print(f"Player {self.player.name}'s dodge rate has increased by {self.player.dodge_rate - original_dodge_rate:.0%} using the skill {self.name}.")
class Attack(Skill):
def __init__(self, player):
self.name = "Attack"
self.type = "Damage"
self.description = "Opponent is hit for 8-12 health."
self.player = player
def use(self, opponent):
self.damage(opponent, random.randint(8, 12))
class Heal(Skill):
def __init__(self, player):
self.name = "Heal"
self.type = "Heal"
self.description = "Player is healed for 7-10 health."
self.player = player
def use(self, opponent):
self.heal(random.randint(7, 10))
class PowerUp(Skill):
def __init__(self, player):
self.name = "Power Up"
self.type = "Buff"
self.description = "Effectiveness of damage skills increases by 25%. Damage multiplier can be increased to a maximum of 100%."
self.player = player
def use(self, opponent):
self.increase_damage(0.25)
class Superspeed(Skill):
def __init__(self, player):
self.name = "Superspeed"
self.type = "Buff"
self.description = "Chance of dodging damage skills or effect from sudden death increases by 5%. Dodge rate can be increased to a maximum of 100%."
self.player = player
def use(self, opponent):
self.increase_dodge(0.05)
class Player:
def __init__(self, number, player_name, output = False):
self.number = number
self.name = player_name
self.skills = [Attack(self), Heal(self), PowerUp(self), Superspeed(self)]
self.health = 100
self.damage_multiplier = 1
self.dodge_rate = 0
self.output = output
def has_fainted(self):
return self.health <= 0
class FightingGameEnv(Env): # Multi agent environment
def __init__(self, output = False):
# Output
self.output = output
# Players
self.player_1 = Player(1, "1", self.output)
self.player_2 = Player(2, "2", self.output)
self.agents = [self.player_1, self.player_2]
self.agent_mapping = dict(zip(self.agents, list(range(len(self.agents)))))
self.agent_selection = self.agents[0]
# Actions
self.action_spaces = {agent: Discrete(4) for agent in self.agents}
# Observations {}: set of int, []: range of float
# <<<player: {1, 2}, turn_number: {1, ..., 50}, sudden_death: {0, 1}, sudden_death_damage: {0, ..., 80},
# player_1_health: {0, ..., 100}, player_2_health: {0, ..., 100}, player_1_damage_multiplier: [1, 2],
# player_2_damage_multiplier: [1, 2], player_1_dodge_rate: [0, 1], player_2_dodge_rate: [0, 1]>>>
self.observation_spaces = {agent: Box(np.array([1., 1., 0., 0., 0., 0., 1., 1., 0., 0.], dtype = np.float64), np.array([1., 50., 1., 80., 100., 100., 2., 2., 1., 1.], dtype = np.float64), dtype = np.float64) for agent in self.agents}
# Parameters
self.turn_number = 1
self.episode_length = 50
self.sudden_death_damage = 2
# Things to return
self.observations = {agent: self.get_state() for agent in self.agents}
self.actions = {agent: None for agent in self.agents}
self.rewards = {agent: 0 for agent in self.agents} # DO NOT RETURN THIS
self.cumulative_rewards = {agent: 0 for agent in self.agents}
self.dones = {agent: False for agent in self.agents}
self.infos = {agent: {} for agent in self.agents}
def render(self, mode = "human"):
# Output
if self.output:
print("Turn:", self.turn_number)
print(f"Player {self.agents[0].name} health: {self.agents[0].health}")
print(f"Player {self.agents[0].name} damage increase: {self.agents[0].damage_multiplier - 1:.0%}")
print(f"Player {self.agents[0].name} dodge rate: {self.agents[0].dodge_rate:.0%}")
print(f"Player {self.agents[1].name} health: {self.agents[1].health}")
print(f"Player {self.agents[1].name} damage increase: {self.agents[1].damage_multiplier - 1:.0%}")
print(f"Player {self.agents[1].name} dodge rate: {self.agents[1].dodge_rate:.0%}")
def get_state(self):
return np.array([self.agent_mapping[self.agent_selection] + 1,
self.turn_number,
1 if self.turn_number > 10 else 0,
self.sudden_death_damage * max(self.turn_number - 10, 0),
self.player_1.health,
self.player_2.health,
self.player_1.damage_multiplier,
self.player_2.damage_multiplier,
self.player_1.dodge_rate,
self.player_2.dodge_rate],
dtype = np.float64)
def reset(self):
self.__init__(self.output) # reset classes made
def step(self, action):
# Agent's action
self.actions[self.agent_selection] = action
# Reset rewards
for agent in self.rewards:
self.rewards[self.agent_selection] = 0
self.cumulative_rewards[self.agent_selection] = 0
# Track previous health
self_health = self.agent_selection.health
opponent_health = self.agents[self.agent_mapping[self.agent_selection] ^ 1].health
# Agent takes action
self.agent_selection.skills[action].use(self.agents[self.agent_mapping[self.agent_selection] ^ 1])
self.infos[self.agent_selection]["action"] = action # logging purposes
# Sudden death
if not self.agents[self.agent_mapping[self.agent_selection] ^ 1].has_fainted(): # if opponent has not fainted
sudden_death_damage = self.sudden_death_damage * max(self.turn_number - 10, 0)
if sudden_death_damage:
if random.random() < self.agent_selection.dodge_rate:
if self.output:
print(f"Player {self.agent_selection.name} dodged the sudden death effect.")
else:
temp_health = self.agent_selection.health
self.agent_selection.health = max(self.agent_selection.health - sudden_death_damage, 0)
if self.output:
print(f"Player {self.agent_selection.name} has been hit for {temp_health - self.agent_selection.health} health by sudden death!")
# Calculate rewards
if self.agent_selection.has_fainted():
self.rewards[self.agent_selection] -= 1
self.rewards[self.agents[self.agent_mapping[self.agent_selection] ^ 1]] += 1
elif self.agents[self.agent_mapping[self.agent_selection] ^ 1].has_fainted():
self.rewards[self.agent_selection] += 1
self.rewards[self.agents[self.agent_mapping[self.agent_selection] ^ 1]] -= 1
# Determine episode completion
if self.agent_mapping[self.agent_selection] == 0: # PLAYER 1
self.dones = {agent: self.player_1.has_fainted() or self.player_2.has_fainted() for agent in self.agents}
elif self.agent_mapping[self.agent_selection] == 1: # PLAYER 2
self.turn_number += 1
self.dones = {agent: self.turn_number >= self.episode_length or self.player_1.has_fainted() or self.player_2.has_fainted() for agent in self.agents} # check for turn number only applies at end of second player's turn
# Selects the next agent
self.agent_selection = self.agents[self.agent_mapping[self.agent_selection] ^ 1]
# Next agent's observation
self.observations[self.agent_selection] = self.get_state()
# Update rewards
for agent, reward in self.rewards.items():
self.cumulative_rewards[agent] += reward
# Output next line
if self.output:
print()
from mcts_simple import Game
from copy import deepcopy
class FightingGame(Game):
def __init__(self, output = False):
self.env = FightingGameEnv(output)
self.prev_env = None
def render(self):
self.env.render()
def get_state(self):
return tuple(self.env.get_state())
def number_of_players(self):
return len(self.env.agents)
def current_player(self):
return self.env.agent_selection.name
def possible_actions(self):
return [str(i) for i in range(4)]
def take_action(self, action):
if action not in self.possible_actions():
raise RuntimeError("Action taken is invalid.")
action = int(action)
self.prev_env = deepcopy(self.env)
self.env.step(action)
def delete_last_action(self):
if self.prev_env is None:
raise RuntimeError("No last action to delete.")
if self.env.output:
raise RuntimeError("Output to terminal should be disabled using output = False when deleting last action.")
self.env = self.prev_env
self.prev_env = None
def has_outcome(self):
return True in self.env.dones.values()
def winner(self):
if not self.has_outcome():
raise RuntimeError("winner() cannot be called when outcome is undefined.")
if self.env.player_2.has_fainted() or self.env.player_1.health > self.env.player_2.health:
return self.env.player_1.name
elif self.env.player_1.has_fainted() or self.env.player_2.health > self.env.player_1.health:
return self.env.player_2.name
else:
return None
## This example shows how Open loop MCTS deals with uncertainty ###
from mcts_simple import OpenLoopMCTS, OpenLoopUCT
# Export trained MCTS
print("Export trained MCTS")
mcts = OpenLoopMCTS(FightingGame(output = False))
mcts.run(iterations = 50000)
mcts._export("FightingGame_MCTS.json")
print()
# Import trained MCTS
print("Import trained MCTS")
mcts = OpenLoopMCTS(FightingGame(output = True))
mcts._import("FightingGame_MCTS.json")
mcts.self_play(activation = "best")
print()
# Export trained UCT
print("Export trained UCT")
uct = OpenLoopUCT(FightingGame(output = False))
uct.run(iterations = 100000)
uct._export("FightingGame_UCT.json")
print()
# Import trained UCT
print("Import trained UCT")
uct = OpenLoopUCT(FightingGame(output = True))
uct._import("FightingGame_UCT.json")
uct.self_play(activation = "best")
print()
# Play with UCT agent
print("Play with UCT agent")
uct = OpenLoopUCT(FightingGame(output = True))
uct._import("FightingGame_UCT.json")
uct.play_with_human(activation = "linear")
print()
```
| github_jupyter |
# 判断爬取下来的url是哪种类型的url
```
CSDN_SEEDS_URL = "https://blog.csdn.net/"
REGEX_CSDN_USER_MY_URL = "http[s]*://my\\.csdn\\.net/\\w+"
REGEX_CSDN_USER_BLOG_URL = "http[s]*://blog\\.csdn\\.net/\\w+"
REGEX_CSDN_BLOG_LIST_URL = "http[s]*://blog\\.csdn\\.net/\\w+/article/list/\\d+\\?"
REGEX_CSDN_BLOG_URL = "http[s]*://blog\\.csdn\\.net/\\w+/article/details/\\w+"
with open("./urls.txt", "r", encoding="utf-8") as f:
urls = f.readlines()
urls[0:5]
import re
from tqdm import tqdm
USER_MY_URL = []
USER_BLOG_URL = []
BLOG_LIST_URL = []
BLOG_URL = []
# 其实也可以简单一点就是一种是博客url,一种是非博客url
for url in tqdm(urls):
if re.match(REGEX_CSDN_BLOG_URL, url) != None:
BLOG_URL.append(url)
elif re.match(REGEX_CSDN_BLOG_LIST_URL, url) != None:
BLOG_LIST_URL.append(url)
elif re.match(REGEX_CSDN_USER_MY_URL, url) != None:
USER_MY_URL.append(url)
elif re.match(REGEX_CSDN_USER_BLOG_URL, url) != None:
USER_BLOG_URL.append(url)
print(len(USER_MY_URL))
print(len(USER_BLOG_URL))
print(len(BLOG_LIST_URL))
print(len(BLOG_URL))
```
# 测试GEN的效果
```
! pip install gne -i https://pypi.tuna.tsinghua.edu.cn/simple
with open('./test.html', 'r', encoding="utf-8") as f:
html = f.read()
print(html)
extractor = GeneralNewsExtractor().extractor.extract(html, with_body_html=True)
from gne import GeneralNewsExtractor
extractor = GeneralNewsExtractor()
result = extractor.extract(html)
print(result)
result["content"]
```
# 测试Python Goose
```
! pip install goose-extractor i https://pypi.tuna.tsinghua.edu.cn/simple
import goose
g = goose.Goose()
article = g.extract(raw_html=html)
print(article.title.encode('gbk', 'ignore'))
print(article.meta_description.encode('gbk', 'ignore'))
print(article.cleaned_text.encode('gbk', 'ignore'))
```
# 使用xpath提取content
```
# 提取content
'//div[contains(@class, "content") and not(contains(@class, "comment" or ""))]'
```
# 测试html_extractor
```
from Engine.html_extractor import MainContent
extractor = MainContent()
url = "https://blog.csdn.net/hihell/article/details/121012464"
with open('./test.html', 'r', encoding="utf-8") as f:
html = f.read()
title,content = extractor.extract(url, html)
print(title, "\n" , "*"*40)
print(content)
```
# 判断url是否为内容界面
而不是列表界面或者主页又或者用户页等
```
# 去除http域名这些
# 粗略地统计以下url中会含有地关键词,后面可以尝试以下神经网络
import jieba
import nltk
from tqdm import tqdm
import re
import pandas as pd
with open('./csdn_urls.txt', 'r', encoding="utf-8") as f:
csdn_urls = f.readlines()
with open('./juejin_urls.txt', 'r', encoding="utf-8") as f:
juejin_urls = f.readlines()
with open('./sg_urls.txt', 'r', encoding="utf-8") as f:
sg_urls = f.readlines()
urls = sg_urls + csdn_urls + juejin_urls
print(len(urls))
dic_words = {}
# nltk不能区分\
# for url in tqdm(urls):
# text = nltk.word_tokenize(url)
# for word in text:
# dic_words[word] = dic_words.get(word, 0) + 1
# for url in tqdm(urls):
# text = jieba.cut(url)
# for word in text:
# dic_words[word] = dic_words.get(word, 0) + 1
for url in tqdm(urls):
text = re.findall('[a-z]+', url.lower())
for word in text:
dic_words[word] = dic_words.get(word, 0) + 1
df = pd.DataFrame.from_dict(dic_words, orient="index")
print(df.sort_values(by=[0],na_position='last'))
df.to_csv("url_keywords.csv")
# 这类词加分减分
# 可以把这些url分词排序然后认为判断加入消极还是积极
NEG_WORDS = ['user', 'list', 'authors', 'comment','writers','blogs']
POS_WORDS = ['article', 'blog', 'details']
# 参考https://help.aliyun.com/document_detail/65096.html
# 还要记住匹配的时候不能区分大小写,同时匹配的时候也仅仅需要匹配url的最后四位就可以了
# 这类词一票否决
FILE_WORDS = ['gif','png','bmp','jpeg','jpg', 'svg',
'mp3','wma','flv','mp4','wmv','ogg','avi',
'doc','docx','xls','xlsx','ppt','pptx','txt','pdf',
'zip','exe','tat','ico','css','js','swf','apk','m3u8','ts']
# 还有就是如果包含很长一串数字的一般都是内容界面
# 不应该是各种文件名的后缀
# 连续数字的匹配
import re
test_url = "https://blog.csdn.net/ITF_001?utm_source=feed"
test_str = "982374589234789"
test_str1 = "sdfdsfsdfsdf232sdfsdf"
rule1 = "[0-9]"*8
print(re.match(rule1, test_str, flags=0))
```
# 将爬取下来的相对URL链接转换为绝对链接
```
from urllib import parse
page_url = 'http://fcg.gxepb.gov.cn/ztzl/hjwfbgt/'
new_relative_url = 'http://fcg.gxepb.gov.cn/hjzf/xzcf/201811/t20181102_46347.html'
new_absolute_url = '../../hjzf/xzcf/201811/t20181102_46347.html'
new_full_url1 = parse.urljoin(page_url, new_relative_url)
new_full_url2 = parse.urljoin(page_url, new_absolute_url)
print(new_full_url1)
print(new_full_url2)
```
# 测试url_parser
```
from Engine.url_parser import is_static_url
with open('./csdn_urls.txt', 'r', encoding="utf-8") as f:
csdn_urls = f.readlines()
with open('./juejin_urls.txt', 'r', encoding="utf-8") as f:
juejin_urls = f.readlines()
with open('./sg_urls.txt', 'r', encoding="utf-8") as f:
sg_urls = f.readlines()
urls = sg_urls + csdn_urls + juejin_urls
print(len(urls))
for i in range(8000):
if is_static_url(urls[i]):
print(urls[i])
```
# 测试gerapy_auto_extractor
```
import sys
sys.path.append("C:/My_app/code/咻Search/Engine")
from gerapy_auto_extractor.classifiers.list import is_list
from gerapy_auto_extractor.classifiers.detail import is_detail
with open("./test.html", 'r', encoding='utf-8') as f:
html= f.read()
# 这个两个可以用
print(is_detail(html,threshold=0.3))
print(is_list(html,threshold=0.9))
import sys
sys.path.append("C:/My_app/code/咻Search/Engine")
from gerapy_auto_extractor.extractors.content import extract_content
from gerapy_auto_extractor.extractors.datetime import extract_datetime
from gerapy_auto_extractor.extractors.list import extract_list
from gerapy_auto_extractor.extractors.title import extract_title
with open("./test.html", 'r', encoding='utf-8') as f:
html= f.read()
# print(extract_title(html))
# print(extract_list(html))
print(extract_datetime(html))
# 这个效果极其垃圾
# content_html = extract_content(html)
{"hah":"hello","D": 123}.get("hah","")
print("zhenghui" or "郑辉")
{"hah":"hello","D": 123}.get("hah","")
```
# 测试查出mysql数据
```
import pandas as pd
import pymysql
from config import MYSQL_HOST, MYSQL_DBNAME, MYSQL_USER, MYSQL_PASSWORD
# 连接数据库
# 加上charset='utf8',避免 'latin-1' encoding 报错等问题
conn = pymysql.connect(host=MYSQL_HOST, user=MYSQL_USER, passwd=MYSQL_PASSWORD,
db=MYSQL_DBNAME, charset='utf8')
# 创建cursor
cursor_blogs = conn.cursor()
cursor_list = conn.cursor()
sql_blogs = 'SELECT page_url, urls FROM search_blogs;'
sql_list = 'SELECT page_url, urls FROM search_blogs;'
# 执行sql语句
cursor_blogs.execute(sql_blogs)
cursor_list.execute(sql_list)
# 获取数据库列表信息
col_blogs = cursor_blogs.description
col_list = cursor_list.description
# 获取全部查询信息
re_blogs = cursor_blogs.fetchall()
re_list = cursor_list.fetchall()
# 获取一行信息
# re = cursor.fetchone()
# 获取的信息默认为tuple类型,将columns转换成DataFrame类型
columns_blogs = pd.DataFrame(list(col_blogs))
# 将数据转换成DataFrame类型,并匹配columns
df_blogs = pd.DataFrame(list(re_blogs), columns=columns_blogs[0])
columns_list = pd.DataFrame(list(col_blogs))
# 将数据转换成DataFrame类型,并匹配columns
df_list = pd.DataFrame(list(re_blogs), columns=columns_list[0])
import ast
blogs_index = [url[0] for url in re_blogs]
list_index = [ast.literal_eval(url[1]) for url in re_blogs]
list_index[0][0]
import numpy as np
def urls2G():
'''
将数据库中urls的关系转化为图
'''
# 连接数据库
# 加上charset='utf8',避免 'latin-1' encoding 报错等问题
conn = pymysql.connect(host=MYSQL_HOST, user=MYSQL_USER, passwd=MYSQL_PASSWORD,
db=MYSQL_DBNAME, charset='utf8')
# 创建cursor
cursor_blogs = conn.cursor()
cursor_list = conn.cursor()
sql_blogs = 'SELECT page_url, urls FROM search_blogs;'
sql_list = 'SELECT page_url, urls FROM search_blogs;'
# 执行sql语句
cursor_blogs.execute(sql_blogs)
cursor_list.execute(sql_list)
# 获取全部查询信息
re_blogs = cursor_blogs.fetchall()
re_list = cursor_list.fetchall()
# 将获取的元组信息转换为图
blogs_index = [url[0] for url in re_blogs]
blogs_point = [ast.literal_eval(url[1]) for url in re_blogs]
list_index = [url[0] for url in re_list]
list_point = [ast.literal_eval(url[1]) for url in re_list]
indexs = blogs_index + list_index
points = blogs_point + list_point
G = np.zeros((len(indexs), len(indexs)))
for i, index in enumerate(indexs):
# 依次判断包含的url是是否在爬取过的列表中,有些广告之类的链接页会包含,但没爬取
for p_url in points[i]:
try:
p_index = indexs.index(p_url)
except:
p_index = -1
if p_index != -1:
G[i][p_index] = 1
return G
urls2G()
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Terrain/alos_global_dsm.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/alos_global_dsm.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Datasets/Terrain/alos_global_dsm.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/alos_global_dsm.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
```
# %%capture
# !pip install earthengine-api
# !pip install geehydro
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
```
# ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
dataset = ee.Image('JAXA/ALOS/AW3D30_V1_1')
elevation = dataset.select('AVE')
elevationVis = {
'min': 0.0,
'max': 4000.0,
'palette': ['0000ff', '00ffff', 'ffff00', 'ff0000', 'ffffff'],
}
Map.setCenter(136.85, 37.37, 4)
Map.addLayer(elevation, elevationVis, 'Elevation')
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
```
from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Input
from keras.models import Model
from rcnn import config, data_generators
from rcnn import losses as losses
import rcnn.roi_helpers as roi_helpers
from keras.utils import generic_utils
from keras.layers import TimeDistributed, Lambda
import tensorflow as tf
from rcnn.clstm import clstm
sess = tf.Session()
K.set_session(sess)
sys.setrecursionlimit(40000)
parser = OptionParser()
video_path = './videos'
annotation_path = './annotations'
num_rois = 32
num_epochs = 2000
config_filename = 'config.pickle'
output_weight_path = './model_frcnn.hdf5'
input_weight_path = None
from rcnn.video_parser import get_data
C = config.Config()
C.use_horizontal_flips = False
C.use_vertical_flips = False
C.rot_90 = False
C.model_path = output_weight_path
C.num_rois = int(num_rois)
from rcnn import simple_nn as nn
C.network = 'simple_nn'
# check if weight path was passed via command line
if input_weight_path:
C.base_net_weights = input_weight_path
all_videos, classes_count, class_mapping = get_data(video_path, annotation_path)
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
C.class_mapping = class_mapping
inv_map = {v: k for k, v in class_mapping.items()}
print('Training images per class:')
pprint.pprint(classes_count)
print('Num classes (including bg) = {}'.format(len(classes_count)))
config_output_filename = config_filename
with open(config_output_filename, 'wb') as config_f:
pickle.dump(C,config_f)
print('Config has been written to {}, and can be loaded when testing to ensure correct results'.format(config_output_filename))
random.shuffle(all_videos)
num_imgs = len(all_videos)
#train_videos = [s for s in all_videos if s['imageset'] == 'trainval']
#val_videos = [s for s in all_videos if s['imageset'] == 'test']
train_videos = all_videos
val_videos = all_videos
print('Num train samples {}'.format(len(train_videos)))
print('Num val samples {}'.format(len(val_videos)))
data_gen_train = data_generators.video_streamer(train_videos, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode='train')
data_gen_val = data_generators.video_streamer(val_videos, classes_count, C, nn.get_img_output_length,K.image_dim_ordering(), mode='val')
input_shape_img = (None, None, None, 3)
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
video_input = tf.placeholder(tf.float32, [None,None,None,None,3])
rpn_target_cls = tf.placeholder(tf.float32, [None,None,None,None,2*num_anchors])
rpn_target_reg = tf.placeholder(tf.float32, [None,None,None,None,2*num_anchors*4])
#roi_input = Input(shape=(None, None, 4))
nb_clstm_filter = 40
def time_broadcast(f, x):
shape = tf.shape(x)
num_videos, num_frames, w, h, c = [shape[i] for i in range(5)]
time_flat = tf.reshape(x, [-1, w,h,c])
y = f(time_flat)
shape = tf.shape(y)
_, w, h, c = [shape[i] for i in range(4)]
y = tf.reshape(y, [num_videos, num_frames, w, h, c])
return y
def build_shared(video_input):
with tf.name_scope('shared_layers'):
base = nn.nn_base(trainable=True)
shared_layers = time_broadcast(base, video_input)
num_channels = 64
shared_layers = clstm(shared_layers,num_channels,nb_clstm_filter,3)
return shared_layers
shared = build_shared(video_input)
def build_rpn(x):
with tf.name_scope('RPN'):
shape = tf.shape(shared)
num_videos, num_frames, w, h, c = [shape[i] for i in range(5)]
c = nb_clstm_filter
time_flat = tf.reshape(x, [-1, w,h,c])
y_cls, y_reg, _ = nn.rpn(num_anchors)(time_flat)
shape = tf.shape(y_cls)
_, w, h, c = [shape[i] for i in range(4)]
y_cls = tf.reshape(y_cls, [num_videos, num_frames, w, h, c])
y_reg = tf.reshape(y_reg, [num_videos, num_frames, w, h, c*4])
return y_cls, y_reg
rpn = build_rpn(shared)
#classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)
#model_rpn = Model(img_input, rpn[:2])
#model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
#model_all = Model([img_input, roi_input], rpn[:2] + classifier)
optimizer = tf.train.AdamOptimizer(0.001)
rpn_loss = losses.rpn_loss_regr(num_anchors)(rpn_target_reg, rpn[1]) \
+ losses.rpn_loss_cls(num_anchors)(rpn_target_cls, rpn[0])
rpn_train_op = optimizer.minimize(rpn_loss)
def run_rpn(X, Y):
sess.run(rpn_train_op, {video_input: X, rpn_target_cls: Y[0], rpn_target_reg: Y[1]})
#model_rpn.compile(optimizer=optimizer, loss=[losses.rpn_loss_cls(num_anchors), losses.rpn_loss_regr(num_anchors)])
#model_classifier.compile(optimizer=optimizer_classifier, loss=[losses.class_loss_cls, losses.class_loss_regr(len(classes_count)-1)], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'})
#model_all.compile(optimizer='sgd', loss='mae')
epoch_length = 1000
num_epochs = int(num_epochs)
iter_num = 0
losses = np.zeros((epoch_length, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
start_time = time.time()
best_loss = np.Inf
class_mapping_inv = {v: k for k, v in class_mapping.items()}
print('Starting training')
vis = True
from keras.layers import Convolution2D
Convolution2D()
import rcnn
import rcnn.data_augment as data_augment
img_data_aug, x_img = data_augment.augment(all_videos[0][0], C, False)
img_data_aug
x_img.shape
all_videos[0][0]
from rcnn.data_generators import get_new_img_size, calc_rpn, get_anchor
import cv2
anc = get_anchor(all_videos[0][0], 2, C, lambda x,y: [x,y], 'tf', mode='val')
C.
cls, reg = calc_rpn(C, all_videos[0][0], 320, 320, 320, 320, lambda x,y: [x,y])
all_videos[0][0]
X.shape
anc[0][0].shape, anc[1][0][0].shape
from matplotlib import pyplot as plt
plt.imshow(anc[0][0])
plt.show()
plt.imshow(anc[1][0][0].sum(axis=-1))#[:30, :30])
plt.show()
from __future__ import absolute_import
import numpy as np
import cv2
import random
import copy
from . import data_augment
import threading
import itertools
def union(au, bu, area_intersection):
area_a = (au[2] - au[0]) * (au[3] - au[1])
area_b = (bu[2] - bu[0]) * (bu[3] - bu[1])
area_union = area_a + area_b - area_intersection
return area_union
def intersection(ai, bi):
x = max(ai[0], bi[0])
y = max(ai[1], bi[1])
w = min(ai[2], bi[2]) - x
h = min(ai[3], bi[3]) - y
if w < 0 or h < 0:
return 0
return w*h
def iou(a, b):
# a and b should be (x1,y1,x2,y2)
if a[0] >= a[2] or a[1] >= a[3] or b[0] >= b[2] or b[1] >= b[3]:
return 0.0
area_i = intersection(a, b)
area_u = union(a, b, area_i)
return float(area_i) / float(area_u + 1e-6)
def get_new_img_size(width, height, img_min_side=600):
if width <= height:
f = float(img_min_side) / width
resized_height = int(f * height)
resized_width = img_min_side
else:
f = float(img_min_side) / height
resized_width = int(f * width)
resized_height = img_min_side
return resized_width, resized_height
class SampleSelector:
def __init__(self, class_count):
# ignore classes that have zero samples
self.classes = [b for b in class_count.keys() if class_count[b] > 0]
self.class_cycle = itertools.cycle(self.classes)
self.curr_class = next(self.class_cycle)
def skip_sample_for_balanced_class(self, img_data):
class_in_img = False
for bbox in img_data['bboxes']:
cls_name = bbox['class']
if cls_name == self.curr_class:
class_in_img = True
self.curr_class = next(self.class_cycle)
break
if class_in_img:
return False
else:
return True
def calc_rpn(C, img_data, width, height, resized_width, resized_height, img_length_calc_function):
downscale = float(C.rpn_stride)
anchor_sizes = C.anchor_box_scales
anchor_ratios = C.anchor_box_ratios
num_anchors = len(anchor_sizes) * len(anchor_ratios)
# calculate the output map size based on the network architecture
(output_width, output_height) = img_length_calc_function(resized_width, resized_height)
n_anchratios = len(anchor_ratios)
# initialise empty output objectives
y_rpn_overlap = np.zeros((output_height, output_width, num_anchors))
y_is_box_valid = np.zeros((output_height, output_width, num_anchors))
y_rpn_regr = np.zeros((output_height, output_width, num_anchors * 4))
num_bboxes = len(img_data['bboxes'])
num_anchors_for_bbox = np.zeros(num_bboxes).astype(int)
best_anchor_for_bbox = -1*np.ones((num_bboxes, 4)).astype(int)
best_iou_for_bbox = np.zeros(num_bboxes).astype(np.float32)
best_x_for_bbox = np.zeros((num_bboxes, 4)).astype(int)
best_dx_for_bbox = np.zeros((num_bboxes, 4)).astype(np.float32)
# get the GT box coordinates, and resize to account for image resizing
gta = np.zeros((num_bboxes, 4))
for bbox_num, bbox in enumerate(img_data['bboxes']):
# get the GT box coordinates, and resize to account for image resizing
gta[bbox_num, 0] = bbox['x1'] * (resized_width / float(width))
gta[bbox_num, 1] = bbox['x2'] * (resized_width / float(width))
gta[bbox_num, 2] = bbox['y1'] * (resized_height / float(height))
gta[bbox_num, 3] = bbox['y2'] * (resized_height / float(height))
# rpn ground truth
for anchor_size_idx in range(len(anchor_sizes)):
for anchor_ratio_idx in range(n_anchratios):
anchor_x = anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][0]
anchor_y = anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][1]
for ix in range(output_width):
# x-coordinates of the current anchor box
x1_anc = downscale * (ix + 0.5) - anchor_x / 2
x2_anc = downscale * (ix + 0.5) + anchor_x / 2
# ignore boxes that go across image boundaries
if x1_anc < 0 or x2_anc > resized_width:
continue
for jy in range(output_height):
# y-coordinates of the current anchor box
y1_anc = downscale * (jy + 0.5) - anchor_y / 2
y2_anc = downscale * (jy + 0.5) + anchor_y / 2
# ignore boxes that go across image boundaries
if y1_anc < 0 or y2_anc > resized_height:
continue
# bbox_type indicates whether an anchor should be a target
bbox_type = 'neg'
# this is the best IOU for the (x,y) coord and the current anchor
# note that this is different from the best IOU for a GT bbox
best_iou_for_loc = 0.0
for bbox_num in range(num_bboxes):
# get IOU of the current GT box and the current anchor box
curr_iou = iou([gta[bbox_num, 0], gta[bbox_num, 2], gta[bbox_num, 1], gta[bbox_num, 3]], [x1_anc, y1_anc, x2_anc, y2_anc])
# calculate the regression targets if they will be needed
if curr_iou > best_iou_for_bbox[bbox_num] or curr_iou > C.rpn_max_overlap:
cx = (gta[bbox_num, 0] + gta[bbox_num, 1]) / 2.0
cy = (gta[bbox_num, 2] + gta[bbox_num, 3]) / 2.0
cxa = (x1_anc + x2_anc)/2.0
cya = (y1_anc + y2_anc)/2.0
tx = (cx - cxa) / (x2_anc - x1_anc)
ty = (cy - cya) / (y2_anc - y1_anc)
tw = np.log((gta[bbox_num, 1] - gta[bbox_num, 0]) / (x2_anc - x1_anc))
th = np.log((gta[bbox_num, 3] - gta[bbox_num, 2]) / (y2_anc - y1_anc))
if img_data['bboxes'][bbox_num]['class'] != 'bg':
# all GT boxes should be mapped to an anchor box, so we keep track of which anchor box was best
if curr_iou > best_iou_for_bbox[bbox_num]:
best_anchor_for_bbox[bbox_num] = [jy, ix, anchor_ratio_idx, anchor_size_idx]
best_iou_for_bbox[bbox_num] = curr_iou
best_x_for_bbox[bbox_num,:] = [x1_anc, x2_anc, y1_anc, y2_anc]
best_dx_for_bbox[bbox_num,:] = [tx, ty, tw, th]
# we set the anchor to positive if the IOU is >0.7 (it does not matter if there was another better box, it just indicates overlap)
if curr_iou > C.rpn_max_overlap:
bbox_type = 'pos'
num_anchors_for_bbox[bbox_num] += 1
# we update the regression layer target if this IOU is the best for the current (x,y) and anchor position
if curr_iou > best_iou_for_loc:
best_iou_for_loc = curr_iou
best_regr = (tx, ty, tw, th)
# if the IOU is >0.3 and <0.7, it is ambiguous and no included in the objective
if C.rpn_min_overlap < curr_iou < C.rpn_max_overlap:
# gray zone between neg and pos
if bbox_type != 'pos':
bbox_type = 'neutral'
# turn on or off outputs depending on IOUs
if bbox_type == 'neg':
y_is_box_valid[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 1
y_rpn_overlap[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 0
elif bbox_type == 'neutral':
y_is_box_valid[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 0
y_rpn_overlap[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 0
elif bbox_type == 'pos':
y_is_box_valid[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 1
y_rpn_overlap[jy, ix, anchor_ratio_idx + n_anchratios * anchor_size_idx] = 1
start = 4 * (anchor_ratio_idx + n_anchratios * anchor_size_idx)
y_rpn_regr[jy, ix, start:start+4] = best_regr
# we ensure that every bbox has at least one positive RPN region
for idx in range(num_anchors_for_bbox.shape[0]):
if num_anchors_for_bbox[idx] == 0:
# no box with an IOU greater than zero ...
if best_anchor_for_bbox[idx, 0] == -1:
continue
y_is_box_valid[
best_anchor_for_bbox[idx,0], best_anchor_for_bbox[idx,1], best_anchor_for_bbox[idx,2] + n_anchratios *
best_anchor_for_bbox[idx,3]] = 1
y_rpn_overlap[
best_anchor_for_bbox[idx,0], best_anchor_for_bbox[idx,1], best_anchor_for_bbox[idx,2] + n_anchratios *
best_anchor_for_bbox[idx,3]] = 1
start = 4 * (best_anchor_for_bbox[idx,2] + n_anchratios * best_anchor_for_bbox[idx,3])
y_rpn_regr[
best_anchor_for_bbox[idx,0], best_anchor_for_bbox[idx,1], start:start+4] = best_dx_for_bbox[idx, :]
y_rpn_overlap = np.transpose(y_rpn_overlap, (2, 0, 1))
y_rpn_overlap = np.expand_dims(y_rpn_overlap, axis=0)
y_is_box_valid = np.transpose(y_is_box_valid, (2, 0, 1))
y_is_box_valid = np.expand_dims(y_is_box_valid, axis=0)
y_rpn_regr = np.transpose(y_rpn_regr, (2, 0, 1))
y_rpn_regr = np.expand_dims(y_rpn_regr, axis=0)
pos_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 1, y_is_box_valid[0, :, :, :] == 1))
neg_locs = np.where(np.logical_and(y_rpn_overlap[0, :, :, :] == 0, y_is_box_valid[0, :, :, :] == 1))
num_pos = len(pos_locs[0])
# one issue is that the RPN has many more negative than positive regions, so we turn off some of the negative
# regions. We also limit it to 256 regions.
num_regions = 256
if len(pos_locs[0]) > num_regions/2:
val_locs = random.sample(range(len(pos_locs[0])), len(pos_locs[0]) - num_regions/2)
y_is_box_valid[0, pos_locs[0][val_locs], pos_locs[1][val_locs], pos_locs[2][val_locs]] = 0
num_pos = num_regions/2
if len(neg_locs[0]) + num_pos > num_regions:
val_locs = random.sample(range(len(neg_locs[0])), len(neg_locs[0]) - num_pos)
y_is_box_valid[0, neg_locs[0][val_locs], neg_locs[1][val_locs], neg_locs[2][val_locs]] = 0
y_rpn_cls = np.concatenate([y_is_box_valid, y_rpn_overlap], axis=1)
y_rpn_regr = np.concatenate([np.repeat(y_rpn_overlap, 4, axis=1), y_rpn_regr], axis=1)
return np.copy(y_rpn_cls), np.copy(y_rpn_regr)
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def next(self):
with self.lock:
return next(self.it)
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def get_anchor_gt(all_img_data, class_count, C, img_length_calc_function, backend, mode='train'):
# The following line is not useful with Python 3.5, it is kept for the legacy
# all_img_data = sorted(all_img_data)
while True:
for img_data in all_img_data:
try:
# read in image, and optionally add augmentation
if mode == 'train':
img_data_aug, x_img = data_augment.augment(img_data, C, augment=True)
else:
img_data_aug, x_img = data_augment.augment(img_data, C, augment=False)
(width, height) = (img_data_aug['width'], img_data_aug['height'])
(rows, cols, _) = x_img.shape
assert cols == width
assert rows == height
# get image dimensions for resizing
(resized_width, resized_height) = get_new_img_size(width, height, C.im_size)
# resize the image so that smalles side is length = 600px
x_img = cv2.resize(x_img, (resized_width, resized_height), interpolation=cv2.INTER_CUBIC)
try:
y_rpn_cls, y_rpn_regr = calc_rpn(C, img_data_aug, width, height, resized_width, resized_height, img_length_calc_function)
except:
continue
# Zero-center by mean pixel, and preprocess image
x_img = x_img[:,:, (2, 1, 0)] # BGR -> RGB
x_img = x_img.astype(np.float32)
x_img[:, :, 0] -= C.img_channel_mean[0]
x_img[:, :, 1] -= C.img_channel_mean[1]
x_img[:, :, 2] -= C.img_channel_mean[2]
x_img /= C.img_scaling_factor
x_img = np.transpose(x_img, (2, 0, 1))
x_img = np.expand_dims(x_img, axis=0)
y_rpn_regr[:, y_rpn_regr.shape[1]//2:, :, :] *= C.std_scaling
if backend == 'tf':
x_img = np.transpose(x_img, (0, 2, 3, 1))
y_rpn_cls = np.transpose(y_rpn_cls, (0, 2, 3, 1))
y_rpn_regr = np.transpose(y_rpn_regr, (0, 2, 3, 1))
return np.copy(x_img), [np.copy(y_rpn_cls), np.copy(y_rpn_regr)], img_data_aug
except Exception as e:
print(e)
continue
Y[1].shape
init = tf.global_variables_initializer()
sess.run(init)
run_rpn(X, Y)
print('Now gonna start training!')
for epoch_num in range(num_epochs):
progbar = generic_utils.Progbar(epoch_length)
print('Epoch {}/{}'.format(epoch_num + 1, num_epochs))
while True:
try:
if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print('Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length))
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
print('Now gonna generate data!')
X, Y, img_data = next(data_gen_train)
print('Now gonna run train op!')
#loss_rpn = model_rpn.train_on_batch(X, Y)
run_rpn(X,Y)
print('Success!')
#P_rpn = model_rpn.predict_on_batch(X)
R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
use_detector = False
if use_detector: #for first runs, do not use detection model
if C.num_rois > 1:
if len(pos_samples) < C.num_rois//2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois//2, replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
losses[iter_num, 0] = loss_rpn[1]
losses[iter_num, 1] = loss_rpn[2]
if use_detector:
losses[iter_num, 2] = loss_class[1]
losses[iter_num, 3] = loss_class[2]
losses[iter_num, 4] = loss_class[3]
iter_num += 1
if use_detector:
progbar.update(iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])), ('detector_cls', np.mean(losses[:iter_num, 2])), ('detector_regr', np.mean(losses[:iter_num, 3]))])
else:
progbar.update(iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1]))])
if iter_num == epoch_length:
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
if use_detector:
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(mean_overlapping_bboxes))
print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc))
print('Loss RPN classifier: {}'.format(loss_rpn_cls))
print('Loss RPN regression: {}'.format(loss_rpn_regr))
if use_detector:
print('Loss Detector classifier: {}'.format(loss_class_cls))
print('Loss Detector regression: {}'.format(loss_class_regr))
print('Elapsed time: {}'.format(time.time() - start_time))
if not use_detector:
loss_class_cls = 0
loss_class_regr = 0
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
iter_num = 0
start_time = time.time()
if curr_loss < best_loss:
if C.verbose:
print('Total loss decreased from {} to {}, saving weights'.format(best_loss,curr_loss))
best_loss = curr_loss
model_all.save_weights(C.model_path)
break
except Exception as e:
print('Exception: {}'.format(e))
continue
print('Training complete, exiting.')
X, Y, data = next(data_gen)
Y[1].shape
from matplotlib import pyplot as plt
plt.imshow(X[0][7])
plt.show()
plt.imshow(Y[1][0][7].sum(axis=-1))
plt.show()
from keras.layers import TimeDistributed
d = get_data('./videos/', './annotations/')
d[0][0]
len(d[0])
```
| github_jupyter |
# Runtime ≈ 1 minute
# This notebook completes the process of wrangling the text for EDA and other future analyses.
# The processing is the following order:
* Scispacy - Acronyms
* General Cleaning
* Spacy - Lemmatization
```
try:
from google.colab import drive
drive.mount('./drive/')
%cd drive/My \ Drive/Text_Summarization
except:
print("No Colab Environment")
import json
import pandas as pd
import numpy as np
import re
import seaborn as sns
import matplotlib
matplotlib.rcParams["figure.figsize"] = (20, 7)
```
# Load Data
```
with open("../Data/raw/Telehealth_article_texts.txt") as f:
#Skip header
for i in range(4):
next(f)
corpus = f.read()
dict_articles = json.loads(corpus.replace("\n",""))
df_articles = pd.DataFrame.from_dict(dict_articles,orient="index",columns=["Content"]).reset_index().drop(columns=["index"])
df_articles.head()
df_metadata = pd.read_excel("../Data/raw/Metadata_telehealth_article_key_2.25.xlsx",sheet_name="Tied_to_Notebook",index_col="Index")
df_metadata.head()
#Ensure both Indexes are of same type before merge
assert df_metadata.index.dtype == df_articles.index.dtype
#Merge dataframes
df_metadata = df_metadata.merge(df_articles,left_index=True,right_index=True,how="left")
df_metadata.head()
JournalCrosswalk = pd.read_excel('../Data/raw/JournalTitles.xlsx')
JournalCrosswalk.set_index('Journal', inplace=True)
JournalCrosswalk.head()
df_metadata.dtypes
df_metadata["Content_Length"] = df_metadata["Content"].apply(lambda text: len(text))
df_metadata["Abstract_Length"] = df_metadata["Abstract"].apply(lambda text: len(str(text)))
df_metadata["Parsed_Keywords"] = df_metadata["Keywords"].apply(lambda keywords: str(keywords).replace("\n\n"," ").split()[1:])
df_metadata["Parsed_Keywords_Length"] = df_metadata["Parsed_Keywords"].apply(lambda text: len(text))
df_metadata["Journal Title"] = df_metadata["Journal Title"].replace('Psychological Servies', 'Psychological Services')
subfield = []
for i in df_metadata['Journal Title']:
#print(i)
try:
if i == 'Clinical Psychology: Science and Practice': # Journal is missing from Crosswalk
subfield.append('Clinical & Counseling Psychology')
else:
subfield.append(JournalCrosswalk['Journal Subfield'][i])
except:
subfield.append('No Match')
df_metadata["Subfield"] = subfield
#Have an idea of reference amount per document
df_metadata["et_al_Count"] = df_metadata["Content"].apply(lambda text: len(list(re.finditer("et al",text))))
df_metadata.head()
#troubleshooting
#emental health
#df_metadata["Content"].iloc[2][4633:5000]
#df_metadata["Clean_Content"].iloc[2][4633:5000]
```
## Acronyms - Include as Vocabulary for Paper
```
#Sci Spacy
#!pip install scispacy
#!pip install https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_core_sci_sm-0.4.0.tar.gz
#Spacy org
#!pip install spacy
#!python3 -m spacy download en_core_web_sm
#!python3 -m spacy download en_core_web_md
```
## Source: https://youtu.be/2_HSKDALwuw?t=708
## Abbreviation Detector Works by:
## 1. Finding Parentheses
## 2. Look up to 10 words behind the bracket
## 3. Greedily choose definition: Look for words next to each other, that in the right order start with the letters in the acronym
```
#ScispaCy: Fast and Robust Models for Biomedical Natural Language Processing: https://www.semanticscholar.org/paper/ScispaCy%3A-Fast-and-Robust-Models-for-Biomedical-Neumann-King/de28ec1d7bd38c8fc4e8ac59b6133800818b4e29
#https://github.com/allenai/SciSpaCy
import spacy
from scispacy.abbreviation import AbbreviationDetector
nlp = spacy.load("en_core_web_md")
nlp.add_pipe("abbreviation_detector")
nlp.Defaults.stop_words |= {"PRON","ll","ve","eg"}
corpus = df_metadata["Content"]
docs = list(nlp.pipe(corpus,disable=["ner","parser","textcat"]))
abrv_dict = dict()
for index,doc in enumerate(docs):
for abrv in doc._.abbreviations:
if str(abrv) not in abrv_dict.keys():
abrv_dict[str(abrv)] = {"LongForm":str(abrv._.long_form),"Document":{index}}
else:
abrv_dict[str(abrv)]["Document"].add(index)
#print(f"{abrv} \t ({abrv.start}, {abrv.end}) {abrv._.long_form} \t Document: {index}")
#Source https://stackoverflow.com/questions/22281059/set-object-is-not-json-serializable
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
#Write original abbriviation dictionary
with open("../references/abbreviation_table.json","w") as f:
json.dump(abrv_dict, f, indent = 4,default=set_default)
#Remove misidentified abbreviations
with open("../references/Incorrect_abbrev.json", "r") as f:
misidentified_abrv = json.load(f)
for key in misidentified_abrv.keys():
abrv_dict.pop(key)
#Correct LongForm of abbreviations
with open("../references/Abbreviation_corrections.json", "r") as f:
correction_abrv = json.load(f)
for key in abrv_dict.keys():
if key in correction_abrv.keys():
abrv_dict[key]["LongForm"] = correction_abrv[key]
#Add abbreviations
with open("../references/Add_to_abbreviation_table.json", "r") as f:
add_abrv = json.load(f)
for key in add_abrv.keys():
abrv_dict[key] = add_abrv[key]
with open("../references/abbreviation_table_processed.json","w") as f:
json.dump(abrv_dict, f, indent = 4, default=set_default)
abrv_dict["PTSD"]["LongForm"]
df_abrv = (pd.read_json("../references/abbreviation_table_processed.json")
.T
.reset_index()
.rename(columns={"index":"Term"}))
df_abrv.head()
df_abrv[df_abrv["Term"] == "PTSD"].head()
#Validation
df_abrv[df_abrv["Term"] == "NYH"]
```
## Clean Data
1. Lowercase
2. Remove Punctuation
3. White Spaces
```
documents_tokens = []
for index,doc in enumerate(docs):
document_tokens = []
for token in doc:
#removes stopwords and punct
if not token.is_stop and not token.is_punct:
if str(token) in abrv_dict.keys():
document_tokens.append(abrv_dict[str(token)]["LongForm"])#Replace short-form with long-form
else:
document_tokens.append(token.lemma_)
documents_tokens.append(" ".join(document_tokens))
df_metadata["Stopwords_Lemma_Longform_Clean_Content"] = documents_tokens
df_metadata.head()
def unwanted_tokens(text):
docuemnt = text
remove_ngrams = ["large image page new","image page new window", "page new window Download","image page new",
"page new window","new window Download","image page","large image","1TABLES figurestablefigure thumbnailtable",
"FIGUREStable","DOWNLOAD","Download","et al"]
for ngram in remove_ngrams:
docuemnt = docuemnt.replace(ngram,"")
return docuemnt
df_metadata["Clean_Content"] = (df_metadata["Stopwords_Lemma_Longform_Clean_Content"].apply(lambda text: unwanted_tokens(text)))
df_metadata.head()
#Add to clean function: Different than a dash ord(8207) compared to 45 for normal dash
print(ord("-"),ord("—"))
from yellowbrick.text import DispersionPlot
import sklearn.metrics
try:
#Troubleshooting tokens to remove
dispersion_text = [doc.split() for doc in df_metadata["Clean_Content"]]
other_words = [token.split() for token in ['et al']]
other_words_1D = np.unique(np.concatenate(other_words).reshape(-1))
target_words = other_words_1D
#Create the visualizer and draw the plot
visualizer = DispersionPlot(target_words,ignore_case=False)
_ = visualizer.fit(dispersion_text)
except:
print("No words found")
df_metadata["Classification"] = (df_metadata["Date Published"].apply(lambda pub_date: "Covid"
if pub_date >= 2020 else "Pre-Covid" ))
df_metadata.to_csv("../Data/processed/Telehealth.csv",index=False)
```
# Trouble Shooting
## Dispersion Plot
```
from yellowbrick.text import DispersionPlot
import sklearn.metrics
try:
#Troubleshooting tokens to remove
dispersion_text = [doc.split() for doc in df_metadata["Clean_Content"]]
other_words = [token.split() for token in ['kbinformation','binformation']]
other_words_1D = np.unique(np.concatenate(other_words).reshape(-1))
target_words = other_words_1D
#Create the visualizer and draw the plot
visualizer = DispersionPlot(target_words,ignore_case=False)
_ = visualizer.fit(dispersion_text)
except:
print("No Words to be found")
```
| github_jupyter |
# SageMaker Inference Pipeline with Scikit Learn and Linear Learner
ISO20022 pacs.008 inference pipeline notebook. This notebook uses training dataset to perform model training. It uses SageMaker Linear Learner to train a model. The problem is defined to be a `binary classification` problem of accepting or rejecting a pacs.008 message.
Amazon SageMaker provides a very rich set of [builtin algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/algorithms-choose.html) for model training and development. This notebook uses [Amazon SageMaker Linear Learner Algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html) on training dataset to perform model training. The Amazon SageMaker linear learner algorithm provides a solution for both classification and regression problems. With the SageMaker algorithm, you can simultaneously explore different training objectives and choose the best solution from a validation set. You can also explore a large number of models and choose the best. The best model optimizes either of the following:
* Continuous objectives, such as mean square error, cross entropy loss, absolute error (regression models).
* Discrete objectives suited for classification, such as F1 measure, precision, recall, or accuracy (classification models).
ML Model development is an iterative process with several tasks that data scientists go through to produce an effective model that can solve business problem. The process typically involves:
* Data exploration and analysis
* Feature engineering
* Model development
* Model training and tuning
* Model deployment
We provide the accompanying notebook [pacs008_xgboost_local.ipynb](./pacs008_xgboost_local.ipynb) which demonstrates data exploration, analysis and feature engineering, focussing on text feature engineering. This notebook uses the results of analysis in [pacs008_xgboost_local.ipynb](./pacs008_xgboost_local.ipynb) to create a feature engineering pipeline using [SageMaker Inference Pipeline](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipelines.html).
Here we define the ML problem to be a `binary classification` problem, that of predicting if a pacs.008 XML message with be processed sucessfully or lead to exception process. The predicts `Success` i.e. 1 or `Failure` i.e. 0.
**Feature Engineering**
Data pre-processing and featurizing the dataset by incorporating standard techniques or prior knowledge is a standard mechanism to make dataset meaningful for training. Once data has been pre-processed and transformed, it can be finally used to train an ML model using an algorithm. However, when the trained model is used for processing real time or batch prediction requests, the model receives data in a format which needs to be pre-processed (e.g. featurized) before it can be passed to the algorithm. In this notebook, we will demonstrate how you can build your ML Pipeline leveraging the Sagemaker Scikit-learn container and SageMaker XGBoost algorithm. After a model is trained, we deploy the Pipeline (Data preprocessing and XGBoost) as an **Inference Pipeline** behind a **single Endpoint** for real time inference and for **batch inferences** using Amazon SageMaker Batch Transform.
We use pacs.008 xml element `<InstrForNxtAgt><InstrInf>TEXT</InstrForNxtAgt></InstrInf>` to perform feature engineer i.e featurize text into new numeric features that can be used in making prodictions.
Since we featurize `InstrForNxtAgt` to numeric representations during training, we have to pre-processs to transform text into numeric features before using the trained model to make predictions.
**Inference Pipeline**
The diagram below shows how Amazon SageMaker Inference Pipeline works. It is used to deploy multi-container endpoints.

**Inference Endpoint**
The diagram below shows the places in the cross-border payment message flow where a call to ML inference endpoint can be injected to get inference from the ML model. The inference result can be used to take additional actions, including corrective actions before sending the message downstream.

**Further Reading:**
For information on Amazon SageMaker Linear Learner algorithm and SageMaker Inference Pipeline visit the following references:
[SageMaker Linear Learner Algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html)
[SageMaker Inference Pipeline](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipelines.html)
## Basic Setup
In this step we do basic setup needed for rest of the notebook:
* Amazon SageMaker API client using boto3
* Amazon SageMaker session object
* AWS region
* AWS IAM role
```
import os
import boto3
import sagemaker
from sagemaker import get_execution_role
sm_client = boto3.Session().client('sagemaker')
sm_session = sagemaker.Session()
region = boto3.session.Session().region_name
role = get_execution_role()
print ("Notebook is running with assumed role {}".format (role))
print("Working with AWS services in the {} region".format(region))
```
### Provide S3 Bucket Name
```
# Working directory for the notebook
WORKDIR = os.getcwd()
BASENAME = os.path.dirname(WORKDIR)
print(f"WORKDIR: {WORKDIR}")
print(f"BASENAME: {BASENAME}")
# Create a directory storing local data
iso20022_data_path = 'iso20022-data'
if not os.path.exists(iso20022_data_path):
# Create a new directory because it does not exist
os.makedirs(iso20022_data_path)
# Store all prototype assets in this bucket
s3_bucket_name = 'iso20022-prototype-t3'
s3_bucket_uri = 's3://' + s3_bucket_name
# Prefix for all files in this prototype
prefix = 'iso20022'
pacs008_prefix = prefix + '/pacs008'
raw_data_prefix = pacs008_prefix + '/raw-data'
labeled_data_prefix = pacs008_prefix + '/labeled-data'
training_data_prefix = pacs008_prefix + '/training-data'
training_headers_prefix = pacs008_prefix + '/training-headers'
test_data_prefix = pacs008_prefix + '/test-data'
training_job_output_prefix = pacs008_prefix + '/training-output'
print(f"Training data with headers will be uploaded to {s3_bucket_uri + '/' + training_headers_prefix}")
print(f"Training data will be uploaded to {s3_bucket_uri + '/' + training_data_prefix}")
print(f"Test data will be uploaded to {s3_bucket_uri + '/' + test_data_prefix}")
print(f"Training job output will be stored in {s3_bucket_uri + '/' + training_job_output_prefix}")
labeled_data_location = s3_bucket_uri + '/' + labeled_data_prefix
training_data_w_headers_location = s3_bucket_uri + '/' + training_headers_prefix
training_data_location = s3_bucket_uri + '/' + training_data_prefix
test_data_location = s3_bucket_uri + '/' + test_data_prefix
print(f"Raw labeled data location = {labeled_data_location}")
print(f"Training data with headers location = {training_data_w_headers_location}")
print(f"Training data location = {training_data_location}")
print(f"Test data location = {test_data_location}")
```
## Prepare Training Dataset
1. Select training dataset from raw labeled dataset.
1. Split labeled dataset to training and test datasets.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import string
from sklearn.model_selection import train_test_split
from sklearn import ensemble, metrics, model_selection, naive_bayes
color = sns.color_palette()
%matplotlib inline
```
### Download raw labeled dataset
```
# Download labeled raw dataset from S3
s3_client = boto3.client('s3')
s3_client.download_file(s3_bucket_name, labeled_data_prefix + '/labeled_data.csv', 'iso20022-data/labeled_data.csv')
# Read the train and test dataset and check the top few lines ##
labeled_raw_df = pd.read_csv("iso20022-data/labeled_data.csv")
labeled_raw_df.head()
```
### Select features for training
```
# Training features
fts=[
'y_target',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_Dbtr_PstlAdr_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_Cdtr_PstlAdr_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_DbtCdtRptgInd',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_Authrty_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_Dtls_Cd',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_InstrForNxtAgt_InstrInf',
]
# New data frame with selected features
selected_df = labeled_raw_df[fts]
selected_df.head()
# Rename columns
selected_df = selected_df.rename(columns={
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_Dbtr_PstlAdr_Ctry': 'Dbtr_PstlAdr_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_Cdtr_PstlAdr_Ctry': 'Cdtr_PstlAdr_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_DbtCdtRptgInd': 'RgltryRptg_DbtCdtRptgInd',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_Authrty_Ctry': 'RgltryRptg_Authrty_Ctry',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_RgltryRptg_Dtls_Cd': 'RgltryRptg_Dtls_Cd',
'Document_FIToFICstmrCdtTrf_CdtTrfTxInf_InstrForNxtAgt_InstrInf': 'InstrForNxtAgt',
})
selected_df.head()
from sklearn.preprocessing import LabelEncoder
# Assign Pandas data types.
categorical_fts=[
'Dbtr_PstlAdr_Ctry',
'Cdtr_PstlAdr_Ctry',
'RgltryRptg_DbtCdtRptgInd',
'RgltryRptg_Authrty_Ctry',
'RgltryRptg_Dtls_Cd'
]
integer_fts=[
]
numeric_fts=[
]
text_fts=[
# Leave text as object
# 'InstrForNxtAgt'
]
# Categorical features to categorical data type.
for col in categorical_fts:
selected_df[col] = selected_df[col].astype(str).astype('category')
# Integer features to int64 data type.
for col in integer_fts:
selected_df[col] = selected_df[col].astype(str).astype('int64')
# Numeric features to float64 data type.
for col in numeric_fts:
selected_df[col] = selected_df[col].astype(str).astype('float64')
# Text features to string data type.
for col in text_fts:
selected_df[col] = selected_df[col].astype(str).astype('string')
label_encoder = LabelEncoder()
selected_df['y_target'] = label_encoder.fit_transform(selected_df['y_target'])
selected_df.dtypes
selected_df.info()
selected_df
X_train_df, X_test_df, y_train_df, y_test_df = train_test_split(selected_df, selected_df['y_target'], test_size=0.20, random_state=299, shuffle=True)
print("Number of rows in train dataset : ",X_train_df.shape[0])
print("Number of rows in test dataset : ",X_test_df.shape[0])
X_train_df
X_test_df
## Save training and test datasets to CSV
train_data_w_headers_output_path = 'iso20022-data/train_data_w_headers.csv'
print(f'Saving training data with headers to {train_data_w_headers_output_path}')
X_train_df.to_csv(train_data_w_headers_output_path, index=False)
train_data_output_path = 'iso20022-data/train_data.csv'
print(f'Saving training data without headers to {train_data_output_path}')
X_train_df.to_csv(train_data_output_path, header=False, index=False)
test_data_output_path = 'iso20022-data/test_data.csv'
print(f'Saving test data without headers to {test_data_output_path}')
X_test_df.to_csv(test_data_output_path, header=False, index=False)
```
### Upload training and test datasets to S3 for training
```
train_input_data_location = sm_session.upload_data(
path=train_data_w_headers_output_path,
bucket=s3_bucket_name,
key_prefix=training_headers_prefix,
)
print(f'Uploaded traing data with headers to: {train_input_data_location}')
train_input_data_location = sm_session.upload_data(
path=train_data_output_path,
bucket=s3_bucket_name,
key_prefix=training_data_prefix,
)
print(f'Uploaded data without headers to: {train_input_data_location}')
test_input_data_location = sm_session.upload_data(
path=test_data_output_path,
bucket=s3_bucket_name,
key_prefix=test_data_prefix,
)
print(f'Uploaded data without headers to: {test_input_data_location}')
```
# Feature Engineering
## Create a Scikit-learn script to train with <a class="anchor" id="create_sklearn_script"></a>
To run Scikit-learn on Sagemaker `SKLearn` Estimator with a script as an entry point. The training script is very similar to a training script you might run outside of SageMaker, but you can access useful properties about the training environment through various environment variables, such as:
* SM_MODEL_DIR: A string representing the path to the directory to write model artifacts to. These artifacts are uploaded to S3 for model hosting.
* SM_OUTPUT_DIR: A string representing the filesystem path to write output artifacts to. Output artifacts may include checkpoints, graphs, and other files to save, not including model artifacts. These artifacts are compressed and uploaded to S3 to the same S3 prefix as the model artifacts.
Supposing two input channels, 'train' and 'test', were used in the call to the Chainer estimator's fit() method, the following will be set, following the format SM_CHANNEL_[channel_name]:
* SM_CHANNEL_TRAIN: A string representing the path to the directory containing data in the 'train' channel
* SM_CHANNEL_TEST: Same as above, but for the 'test' channel.
A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to model_dir so that it can be hosted later. Hyperparameters are passed to your script as arguments and can be retrieved with an argparse.ArgumentParser instance.
### Create SageMaker Scikit Estimator <a class="anchor" id="create_sklearn_estimator"></a>
To run our Scikit-learn training script on SageMaker, we construct a `sagemaker.sklearn.estimator.sklearn` estimator, which accepts several constructor arguments:
* __entry_point__: The path to the Python script SageMaker runs for training and prediction.
* __role__: Role ARN
* __framework_version__: Scikit-learn version you want to use for executing your model training code.
* __train_instance_type__ *(optional)*: The type of SageMaker instances for training. __Note__: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.
* __sagemaker_session__ *(optional)*: The session used to train on Sagemaker.
```
from sagemaker.sklearn.estimator import SKLearn
preprocessing_job_name = 'pacs008-preprocessor-ll'
print('data preprocessing job name: ' + preprocessing_job_name)
FRAMEWORK_VERSION = "0.23-1"
source_dir = "../sklearn-transformers"
script_file = "pacs008_sklearn_featurizer.py"
sklearn_preprocessor = SKLearn(
entry_point=script_file,
source_dir=source_dir,
role=role,
framework_version=FRAMEWORK_VERSION,
instance_type="ml.c4.xlarge",
sagemaker_session=sm_session,
base_job_name=preprocessing_job_name,
)
sklearn_preprocessor.fit({"train": train_input_data_location})
```
### Batch transform our training data <a class="anchor" id="preprocess_train_data"></a>
Now that our proprocessor is properly fitted, let's go ahead and preprocess our training data. Let's use batch transform to directly preprocess the raw data and store right back into s3.
```
# Define a SKLearn Transformer from the trained SKLearn Estimator
transformer = sklearn_preprocessor.transformer(
instance_count=1,
instance_type="ml.m5.xlarge",
assemble_with="Line",
accept="text/csv",
)
# Preprocess training input
transformer.transform(train_input_data_location, content_type="text/csv")
print("Waiting for transform job: " + transformer.latest_transform_job.job_name)
transformer.wait()
preprocessed_train = transformer.output_path
```
# Train a Linear Learner Model
## Fit a LinearLearner Model with the preprocessed data <a class="anchor" id="training_model"></a>
Let's take the preprocessed training data and fit a LinearLearner Model. Sagemaker provides prebuilt algorithm containers that can be used with the Python SDK. The previous Scikit-learn job preprocessed the labeled raw pacs.008 dataset into useable training data that we can now use to fit a binary classifier Linear Learner model.
For more on Linear Learner see: https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html
```
from sagemaker.image_uris import retrieve
ll_image = retrieve("linear-learner", boto3.Session().region_name)
# Set job name
training_job_name = 'pacs008-ll-training'
print('Linear Learner training job name: ' + training_job_name)
# S3 bucket for storing model artifacts
training_job_output_location = s3_bucket_uri + '/' + training_job_output_prefix + '/ll_model'
ll_estimator = sagemaker.estimator.Estimator(
ll_image,
role,
instance_count=1,
instance_type="ml.m4.2xlarge",
volume_size=20,
max_run=3600,
input_mode="File",
output_path=training_job_output_location,
sagemaker_session=sm_session,
base_job_name=training_job_name,
)
# binary_classifier_model_selection_criteria: accuracy is default
# - accuracy | f_beta | precision_at_target_recall |recall_at_target_precision | loss_function
# feature_dim=auto, # auto or actual number, default is auto
# epochs=15, default is 15
# learning_rate=auto or actual number 0.05 or 0.005
# loss=logistic | auto |hinge_loss, default is logistic
# mini_batch_size=32, default is 1000
# num_models=auto, or a number
# optimizer=auto or sgd | adam | rmsprop
ll_estimator.set_hyperparameters(
predictor_type="binary_classifier",
binary_classifier_model_selection_criteria="accuracy",
epochs=15,
mini_batch_size=32)
ll_train_data = sagemaker.inputs.TrainingInput(
preprocessed_train, # set after preprocessing job completes
distribution="FullyReplicated",
content_type="text/csv",
s3_data_type="S3Prefix",
)
data_channels = {"train": ll_train_data}
ll_estimator.fit(inputs=data_channels, logs=True)
```
# Serial Inference Pipeline with Scikit preprocessor and Linear Learner <a class="anchor" id="serial_inference"></a>
## Set up the inference pipeline <a class="anchor" id="pipeline_setup"></a>
Setting up a Machine Learning pipeline can be done with the Pipeline Model. This sets up a list of models in a single endpoint. We configure our pipeline model with the fitted Scikit-learn inference model (data preprocessing/feature engineering model) and the fitted Linear Learner model. Deploying the model follows the standard ```deploy``` pattern in the SageMaker Python SDK.
```
from sagemaker.model import Model
from sagemaker.pipeline import PipelineModel
import boto3
from time import gmtime, strftime
timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# The two SageMaker Models: one for data preprocessing, and second for inference
scikit_learn_inferencee_model = sklearn_preprocessor.create_model()
linear_learner_model = ll_estimator.create_model()
model_name = "pacs008-ll-inference-pipeline-" + timestamp_prefix
endpoint_name = "pacs008-ll-inference-pipeline-ep-" + timestamp_prefix
sm_model = PipelineModel(
name=model_name, role=role, models=[scikit_learn_inferencee_model, linear_learner_model]
)
sm_model.deploy(initial_instance_count=1, instance_type="ml.c4.xlarge", endpoint_name=endpoint_name)
```
### Store Model Name and Endpoint Name in Notebook Magic Store
These notebook magic store values are used in the example batch transform notebook.
```
%store model_name
%store endpoint_name
```
## Make a request to our pipeline endpoint <a class="anchor" id="pipeline_inference_request"></a>
The diagram below shows the places in the cross-border payment message flow where a call to ML inference endpoint can be injected to get inference from the ML model. The inference result can be used to take additional actions, including corrective actions before sending the message downstream.

Here we just grab the first line from the test data (you'll notice that the inference python script is very particular about the ordering of the inference request data). The ```ContentType``` field configures the first container, while the ```Accept``` field configures the last container. You can also specify each container's ```Accept``` and ```ContentType``` values using environment variables.
We make our request with the payload in ```'text/csv'``` format, since that is what our script currently supports. If other formats need to be supported, this would have to be added to the ```output_fn()``` method in our entry point. Note that we set the ```Accept``` to ```application/json```, since Linear Learner does not support ```text/csv``` ```Accept```. The inference output in this case is trying to predict `Success` or `Failure` of ISO20022 pacs.008 payment message using only the subset of message XML elements in the message i.e. features on which model was trained.
```
from sagemaker.predictor import Predictor
from sagemaker.serializers import CSVSerializer
# payload_1, expect: Failure
#payload_1 = "US,GB,,,,/SVC/It is to be delivered in three days. Greater than three days penalty add 2bp per day"
payload_1 = "MX,GB,,,,/SVC/It is to be delivered in four days. Greater than four days penalty add 2bp per day"
# payload_2, expect: Success
payload_2 = "MX,GB,,,,"
#payload_2 = "US,IE,,,,/TRSY/Treasury Services Platinum Customer"
# payload_3, expect: Failure
payload_3 = "TH,US,,,,/SVC/It is to be delivered in four days. Greater than four days penalty add 2bp per day"
#payload_3 = "CA,US,,,,/SVC/It is to be delivered in three days. Greater than three days penalty add 2bp per day"
# payload_4, expect: Success
payload_4 = "IN,CA,DEBT,IN,00.P0006,"
# payload_5, expect: Success
payload_5 = "IE,IN,CRED,IN,0,/REG/15.X0003 FDI in Transportation"
# Failure
payload_5 = "IE,IN,CRED,IN,0,/REG/15.X0009 FDI in Agriculture "
# Failure
payload_5 = "IE,IN,CRED,IN,0,/REG/15.X0004 retail"
# payload_6, expect: Failure
payload_6 = "IE,IN,CRED,IN,0,/REG/99.C34698"
#payload_6 = "MX,IE,,,,/TRSY/eweweww"
endpoint_name = 'pacs008-ll-inference-pipeline-ep-2021-11-25-00-58-52'
predictor = Predictor(
endpoint_name=endpoint_name, sagemaker_session=sm_session, serializer=CSVSerializer()
)
print(f"1. Expect Failure i.e. 0, {predictor.predict(payload_1)}")
print(f"2. Expect Success i.e. 1, {predictor.predict(payload_2)}")
print(f"3. Expect Failure i.e. 0, {predictor.predict(payload_3)}")
print(f"4. Expect Success i.e. 1, {predictor.predict(payload_4)}")
print(f"5. Expect Success i.e. 1, {predictor.predict(payload_5)}")
print(f"6. Expect Failure i.e. 0, {predictor.predict(payload_6)}")
```
# Delete Endpoint
Once we are finished with the endpoint, we clean up the resources!
```
sm_client = sm_session.boto_session.client("sagemaker")
sm_client.delete_endpoint(EndpointName=endpoint_name)
```
| github_jupyter |
```
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Vertex client library: Custom training tabular regression model with pipeline for online prediction with training pipeline
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
## Overview
This tutorial demonstrates how to use the Vertex client library for Python to train and deploy a custom tabular regression model for online prediction, using a training pipeline.
### Dataset
The dataset used for this tutorial is the [Boston Housing Prices dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html). The version of the dataset you will use in this tutorial is built into TensorFlow. The trained model predicts the median price of a house in units of 1K USD.
### Objective
In this tutorial, you create a custom model from a Python script in a Google prebuilt Docker container using the Vertex client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using `gcloud` command-line tool or online using Google Cloud Console.
The steps performed include:
- Create a Vertex custom job for training a model.
- Create a `TrainingPipeline` resource.
- Train a TensorFlow model with the `TrainingPipeline` resource.
- Retrieve and load the model artifacts.
- View the model evaluation.
- Upload the model as a Vertex `Model` resource.
- Deploy the `Model` resource to a serving `Endpoint` resource.
- Make a prediction.
- Undeploy the `Model` resource.
### Costs
This tutorial uses billable components of Google Cloud (GCP):
* Vertex AI
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
## Installation
Install the latest version of Vertex client library.
```
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
```
Install the latest GA version of *google-cloud-storage* library as well.
```
! pip3 install -U google-cloud-storage $USER_FLAG
```
### Restart the kernel
Once you've installed the Vertex client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
```
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
## Before you begin
### GPU runtime
*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
5. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
```
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/vertex-ai/docs/general/locations)
```
REGION = "us-central1" # @param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
**Click Create service account**.
In the **Service account name** field, enter a name, and click **Create**.
In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
```
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you submit a custom training job using the Vertex client library, you upload a Python package
containing your training code to a Cloud Storage bucket. Vertex runs
the code from this package. In this tutorial, Vertex also saves the
trained model that results from your job in the same bucket. You can then
create an `Endpoint` resource based on this output in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
#### Import Vertex client library
Import the Vertex client library into our Python environment.
```
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
```
#### Vertex constants
Setup up the following constants for Vertex:
- `API_ENDPOINT`: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.
- `PARENT`: The Vertex location root path for dataset, model, job, pipeline and endpoint resources.
```
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
```
#### CustomJob constants
Set constants unique to CustomJob training:
- Dataset Training Schemas: Tells the `Pipeline` resource service the task (e.g., classification) to train the model for.
```
CUSTOM_TASK_GCS_PATH = (
"gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml"
)
```
#### Hardware Accelerators
Set the hardware accelerators (e.g., GPU), if any, for training and prediction.
Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
For GPU, available accelerators include:
- aip.AcceleratorType.NVIDIA_TESLA_K80
- aip.AcceleratorType.NVIDIA_TESLA_P100
- aip.AcceleratorType.NVIDIA_TESLA_P4
- aip.AcceleratorType.NVIDIA_TESLA_T4
- aip.AcceleratorType.NVIDIA_TESLA_V100
Otherwise specify `(None, None)` to use a container image to run on a CPU.
*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
```
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
```
#### Container (Docker) image
Next, we will set the Docker container images for training and prediction
- TensorFlow 1.15
- `gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest`
- TensorFlow 2.1
- `gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest`
- TensorFlow 2.2
- `gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest`
- TensorFlow 2.3
- `gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.2-3:latest`
- TensorFlow 2.4
- `gcr.io/cloud-aiplatform/training/tf-cpu.2-4:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.2-4:latest`
- XGBoost
- `gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1`
- Scikit-learn
- `gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest`
- Pytorch
- `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest`
- `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-5:latest`
- `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-6:latest`
- `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-7:latest`
For the latest list, see [Pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers).
- TensorFlow 1.15
- `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest`
- `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest`
- TensorFlow 2.1
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest`
- TensorFlow 2.2
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest`
- TensorFlow 2.3
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest`
- XGBoost
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest`
- Scikit-learn
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest`
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest`
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest`
For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
```
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
```
#### Machine Type
Next, set the machine type to use for training and prediction.
- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction.
- `machine type`
- `n1-standard`: 3.75GB of memory per vCPU.
- `n1-highmem`: 6.5GB of memory per vCPU
- `n1-highcpu`: 0.9 GB of memory per vCPU
- `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
*Note: The following is not supported for training:*
- `standard`: 2 vCPUs
- `highcpu`: 2, 4 and 8 vCPUs
*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
```
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
```
# Tutorial
Now you are ready to start creating your own custom model and training for Boston Housing.
## Set up clients
The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.
You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.
- Model Service for `Model` resources.
- Pipeline Service for training.
- Endpoint Service for deployment.
- Job Service for batch jobs and custom training.
- Prediction Service for serving.
```
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
```
## Train a model
There are two ways you can train a custom model using a container image:
- **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.
- **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model.
## Prepare your custom job specification
Now that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following:
- `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed)
- `python_package_spec` : The specification of the Python package to be installed with the pre-built container.
### Prepare your machine specification
Now define the machine specification for your custom training job. This tells Vertex what type of machine instance to provision for the training.
- `machine_type`: The type of GCP instance to provision -- e.g., n1-standard-8.
- `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU.
- `accelerator_count`: The number of accelerators.
```
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
```
### Prepare your disk specification
(optional) Now define the disk specification for your custom training job. This tells Vertex what type and size of disk to provision in each machine instance for the training.
- `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD.
- `boot_disk_size_gb`: Size of disk in GB.
```
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
```
### Define the worker pool specification
Next, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following:
- `replica_count`: The number of instances to provision of this machine type.
- `machine_spec`: The hardware specification.
- `disk_spec` : (optional) The disk storage specification.
- `python_package`: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module.
Let's dive deeper now into the python package specification:
-`executor_image_spec`: This is the docker image which is configured for your custom training job.
-`package_uris`: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image.
-`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix.
-`args`: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting:
- `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts:
- direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or
- indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification.
- `"--epochs=" + EPOCHS`: The number of epochs for training.
- `"--steps=" + STEPS`: The number of steps (batches) per epoch.
- `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training.
- `"single"`: single device.
- `"mirror"`: all GPU devices on a single compute instance.
- `"multi"`: all GPU devices on all compute instances.
- `"--param-file=" + PARAM_FILE`: The Cloud Storage location for storing feature normalization values.
```
JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
PARAM_FILE = BUCKET_NAME + "/params.txt"
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_boston.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]
```
### Examine the training package
#### Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
- PKG-INFO
- README.md
- setup.cfg
- setup.py
- trainer
- \_\_init\_\_.py
- task.py
The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.
The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`).
#### Package Assembly
In the following cells, you will assemble the training package.
```
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
```
#### Task.py contents
In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary:
- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.
- Loads Boston Housing dataset from TF.Keras builtin datasets
- Builds a simple deep neural network model using TF.Keras model API.
- Compiles the model (`compile()`).
- Sets a training distribution strategy according to the argument `args.distribute`.
- Trains the model (`fit()`) with epochs specified by `args.epochs`.
- Saves the trained model (`save(args.model_dir)`) to the specified model directory.
- Saves the maximum value for each feature `f.write(str(params))` to the specified parameters file.
```
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir)
```
#### Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
```
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz
```
## Train the model using a `TrainingPipeline` resource
Now start training of your custom training job using a training pipeline on Vertex. To train the your custom model, do the following steps:
1. Create a Vertex `TrainingPipeline` resource for the `Dataset` resource.
2. Execute the pipeline to start the training.
### Create a `TrainingPipeline` resource
You may ask, what do we use a pipeline for? We typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of:
1. Being reusable for subsequent training jobs.
2. Can be containerized and ran as a batch job.
3. Can be distributed.
4. All the steps are associated with the same pipeline job for tracking progress.
#### The `training_pipeline` specification
First, you need to describe a pipeline specification. Let's look into the *minimal* requirements for constructing a `training_pipeline` specification for a custom job:
- `display_name`: A human readable name for the pipeline job.
- `training_task_definition`: The training task schema.
- `training_task_inputs`: A dictionary describing the requirements for the training job.
- `model_to_upload`: A dictionary describing the specification for the (uploaded) Vertex custom `Model` resource.
- `display_name`: A human readable name for the `Model` resource.
- `artificat_uri`: The Cloud Storage path where the model artifacts are stored in SavedModel format.
- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the custom model will serve predictions.
```
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
MODEL_NAME = "custom_pipeline-" + TIMESTAMP
PIPELINE_DISPLAY_NAME = "custom-training-pipeline" + TIMESTAMP
training_task_inputs = json_format.ParseDict(
{"workerPoolSpecs": worker_pool_spec}, Value()
)
pipeline = {
"display_name": PIPELINE_DISPLAY_NAME,
"training_task_definition": CUSTOM_TASK_GCS_PATH,
"training_task_inputs": training_task_inputs,
"model_to_upload": {
"display_name": PIPELINE_DISPLAY_NAME + "-model",
"artifact_uri": MODEL_DIR,
"container_spec": {"image_uri": DEPLOY_IMAGE},
},
}
print(pipeline)
```
#### Create the training pipeline
Use this helper function `create_pipeline`, which takes the following parameter:
- `training_pipeline`: the full specification for the pipeline training job.
The helper function calls the pipeline client service's `create_pipeline` method, which takes the following parameters:
- `parent`: The Vertex location root path for your `Dataset`, `Model` and `Endpoint` resources.
- `training_pipeline`: The full specification for the pipeline training job.
The helper function will return the Vertex fully qualified identifier assigned to the training pipeline, which is saved as `pipeline.name`.
```
def create_pipeline(training_pipeline):
try:
pipeline = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
print(pipeline)
except Exception as e:
print("exception:", e)
return None
return pipeline
response = create_pipeline(pipeline)
```
Now save the unique identifier of the training pipeline you created.
```
# The full unique ID for the pipeline
pipeline_id = response.name
# The short numeric ID for the pipeline
pipeline_short_id = pipeline_id.split("/")[-1]
print(pipeline_id)
```
### Get information on a training pipeline
Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's `get_training_pipeline` method, with the following parameter:
- `name`: The Vertex fully qualified pipeline identifier.
When the model is done training, the pipeline state will be `PIPELINE_STATE_SUCCEEDED`.
```
def get_training_pipeline(name, silent=False):
response = clients["pipeline"].get_training_pipeline(name=name)
if silent:
return response
print("pipeline")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" state:", response.state)
print(" training_task_definition:", response.training_task_definition)
print(" training_task_inputs:", dict(response.training_task_inputs))
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", dict(response.labels))
return response
response = get_training_pipeline(pipeline_id)
```
# Deployment
Training the above model may take upwards of 20 minutes time.
Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field `model_to_deploy.name`.
```
while True:
response = get_training_pipeline(pipeline_id, True)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_id = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
raise Exception("Training Job Failed")
else:
model_to_deploy = response.model_to_upload
model_to_deploy_id = model_to_deploy.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
print("model to deploy:", model_to_deploy_id)
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
```
## Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.
```
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
```
## Evaluate the model
Now let's find out how good the model is.
### Load evaluation data
You will load the Boston Housing test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the feature data, and the corresponding labels (median value of owner-occupied home).
You don't need the training data, and hence why we loaded it as `(_, _)`.
Before you can run the data through evaluation, you need to preprocess it:
x_test:
1. Normalize (rescaling) the data in each column by dividing each value by the maximum value of that column. This will replace each single value with a 32-bit floating point number between 0 and 1.
```
import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled)
```
### Perform the model evaluation
Now evaluate how well the model in the custom job did.
```
model.evaluate(x_test, y_test)
```
## Upload the model for serving
Next, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model.
### How does the serving function work
When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.
The serving function consists of two parts:
- `preprocessing function`:
- Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph).
- Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.
- `post-processing function`:
- Converts the model output to format expected by the receiving application -- e.q., compresses the output.
- Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.
Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.
One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported.
## Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
```
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
```
### Upload the model
Use this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions.
The helper function takes the following parameters:
- `display_name`: A human readable name for the `Endpoint` service.
- `image_uri`: The container image for the model deployment.
- `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`.
The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters:
- `parent`: The Vertex location root path for `Dataset`, `Model` and `Endpoint` resources.
- `model`: The specification for the Vertex `Model` resource instance.
Let's now dive deeper into the Vertex model specification `model`. This is a dictionary object that consists of the following fields:
- `display_name`: A human readable name for the `Model` resource.
- `metadata_schema_uri`: Since your model was built without an Vertex `Dataset` resource, you will leave this blank (`''`).
- `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format.
- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
Uploading a model into a Vertex Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready.
The helper function returns the Vertex fully qualified identifier for the corresponding Vertex Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id.
```
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"boston-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
```
### Get `Model` resource information
Now let's get the model information for just your model. Use this helper function `get_model`, with the following parameter:
- `name`: The Vertex unique identifier for the `Model` resource.
This helper function calls the Vertex `Model` client service's method `get_model`, with the following parameter:
- `name`: The Vertex unique identifier for the `Model` resource.
```
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
```
## Deploy the `Model` resource
Now deploy the trained Vertex custom `Model` resource. This requires two steps:
1. Create an `Endpoint` resource for deploying the `Model` resource to.
2. Deploy the `Model` resource to the `Endpoint` resource.
### Create an `Endpoint` resource
Use this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter:
- `display_name`: A human readable name for the `Endpoint` resource.
The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter:
- `display_name`: A human readable name for the `Endpoint` resource.
Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the `Endpoint` resource: `response.name`.
```
ENDPOINT_NAME = "boston_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
```
Now get the unique identifier for the `Endpoint` resource you created.
```
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
```
### Compute instance scaling
You have several choices on scaling the compute instances for handling your online prediction requests:
- Single Instance: The online prediction requests are processed on a single compute instance.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.
- Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.
- Auto Scaling: The online prediction requests are split across a scaleable number of compute instances.
- Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.
The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.
```
MIN_NODES = 1
MAX_NODES = 1
```
### Deploy `Model` resource to the `Endpoint` resource
Use this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters:
- `model`: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline.
- `deploy_model_display_name`: A human readable name for the deployed model.
- `endpoint`: The Vertex fully qualified endpoint identifier to deploy the model to.
The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters:
- `endpoint`: The Vertex fully qualified `Endpoint` resource identifier to deploy the `Model` resource to.
- `deployed_model`: The requirements specification for deploying the model.
- `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs.
- If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic.
- If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100.
Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields:
- `model`: The Vertex fully qualified model identifier of the (upload) model to deploy.
- `display_name`: A human readable name for the deployed model.
- `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.
- `dedicated_resources`: This refers to how many compute instances (replicas) that are scaled for serving prediction requests.
- `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
- `min_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`.
- `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`.
#### Traffic Split
Let's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.
Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision.
#### Response
The method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources.
```
DEPLOYED_NAME = "boston_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
```
## Make a online prediction request
Now do a online prediction to your deployed model.
### Get test item
You will use an example out of the test (holdout) portion of the dataset as a test item.
```
test_item = x_test[0]
test_label = y_test[0]
print(test_item.shape)
```
### Send the prediction request
Ok, now you have a test data item. Use this helper function `predict_data`, which takes the parameters:
- `data`: The test data item as a numpy 1D array of floating point values.
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed.
- `parameters_dict`: Additional parameters for serving.
This function uses the prediction client service and calls the `predict` method with the parameters:
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed.
- `instances`: A list of instances (data items) to predict.
- `parameters`: Additional parameters for serving.
To pass the test data to the prediction service, you package it for transmission to the serving binary as follows:
1. Convert the data item from a 1D numpy array to a 1D Python list.
2. Convert the prediction request to a serialized Google protobuf (`json_format.ParseDict()`)
Each instance in the prediction request is a dictionary entry of the form:
{input_name: content}
- `input_name`: the name of the input layer of the underlying model.
- `content`: The data item as a 1D Python list.
Since the `predict()` service can take multiple data items (instances), you will send your single data item as a list of one data item. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the `predict()` service.
The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction:
- `predictions` -- the predicated median value of a house in units of 1K USD.
```
def predict_data(data, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: data.tolist()}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_data(test_item, endpoint_id, None)
```
## Undeploy the `Model` resource
Now undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters:
- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to.
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to.
This function calls the endpoint client service's method `undeploy_model`, with the following parameters:
- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed.
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed.
- `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource.
Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}.
```
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
```
# Cleaning up
To clean up all GCP resources used in this project, you can [delete the GCP
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
```
| github_jupyter |
### Dependences
```
import sys
sys.path.append("../")
import math
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from PIL import Image
import matplotlib.pyplot as plt
from IPython.display import clear_output
from lib.models.LinkNet import LinkNet
import lib.utils as utils
import IPython.display as ipd
```
### Loading experiment data
```
#set experiment ID
EXP_ID = "LinkNet"
utils.create_experiment_folders(EXP_ID)
utils.load_experiment_data()
```
### Model instantiation
```
model = LinkNet()
model.build((None,128,128,1))
print(model.summary())
```
### Loading Dataset
```
train_x = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/X_train.npy", mmap_mode='c')
train_y = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/y_train.npy", mmap_mode='c')
qtd_traning = train_x.shape
print("Loaded",qtd_traning, "samples")
valid_x_1 = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/X_val.npy", mmap_mode='c')
valid_y_1 = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/y_val.npy", mmap_mode='c')
qtd_traning = valid_x_1.shape
print("Loaded",qtd_traning, "samples")
```
### Dataset Normalization and Batches split
```
value = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/scale_and_shift.npy", mmap_mode='c')
print(value)
SHIFT_VALUE_X, SHIFT_VALUE_Y, SCALE_VALUE_X, SCALE_VALUE_Y = value[0], value[1], value[2], value[3]
# SHIFT_VALUE_X, SHIFT_VALUE_Y, SCALE_VALUE_X, SCALE_VALUE_Y = utils.get_shift_scale_maxmin(train_x, train_y, valid_x_1, valid_y_1)
mini_batch_size = 58
num_train_minibatches = math.floor(train_x.shape[0]/mini_batch_size)
num_val_minibatches = math.floor(valid_x_1.shape[0]/mini_batch_size)
print("train_batches:", num_train_minibatches, "valid_batches:", num_val_minibatches)
```
### Metrics
```
#default tf.keras metrics
train_loss = tf.keras.metrics.Mean(name='train_loss')
```
### Set Loss and load model weights
```
loss_object = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
#get last saved epoch index and best result in validation step
CURRENT_EPOCH, BEST_VALIDATION = utils.get_model_last_data()
if CURRENT_EPOCH > 0:
print("Loading last model state in epoch", CURRENT_EPOCH)
model.load_weights(utils.get_exp_folder_last_epoch())
print("Best validation result was PSNR=", BEST_VALIDATION)
```
### Training
```
@tf.function
def train_step(patch_x, patch_y):
with tf.GradientTape() as tape:
predictions = model(patch_x)
loss = loss_object(patch_y, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
def valid_step(valid_x, valid_y, num_val_minibatches, mini_batch_size):
valid_mse = tf.keras.metrics.MeanSquaredError(name='train_mse')
valid_custom_metrics = utils.CustomMetric()
for i in tqdm(range(num_val_minibatches)):
data_x = valid_x[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_y = valid_y[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
data_y = tf.convert_to_tensor(data_y, dtype=tf.float32)
data_x = ((data_x+SHIFT_VALUE_X)/SCALE_VALUE_X)+CONST_GAMA
data_y = ((data_y+SHIFT_VALUE_Y)/SCALE_VALUE_Y)+CONST_GAMA
predictions = model(data_x)
valid_mse(data_y, predictions)
predictions = predictions.numpy()
data_y = data_y.numpy()
#feed the metric evaluator
valid_custom_metrics.feed(data_y, predictions)
#get metric results
psnr, nrmse = valid_custom_metrics.result()
valid_mse_result = valid_mse.result().numpy()
valid_custom_metrics.reset_states()
valid_mse.reset_states()
return psnr, nrmse, valid_mse_result
MAX_EPOCHS = 100
EVAL_STEP = 1
CONST_GAMA = 0.001
for epoch in range(CURRENT_EPOCH, MAX_EPOCHS):
#TRAINING
print("TRAINING EPOCH", epoch)
for k in tqdm(range(0, num_train_minibatches)):
seismic_x = train_x[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
seismic_y = train_y[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
seismic_x = tf.convert_to_tensor(seismic_x, dtype=tf.float32)
seismic_y = tf.convert_to_tensor(seismic_y, dtype=tf.float32)
seismic_x = ((seismic_x+SHIFT_VALUE_X)/SCALE_VALUE_X)+CONST_GAMA
seismic_y = ((seismic_y+SHIFT_VALUE_Y)/SCALE_VALUE_Y)+CONST_GAMA
train_step(seismic_x, seismic_y)
#VALIDATION
if epoch%EVAL_STEP == 0:
clear_output()
print("VALIDATION EPOCH", epoch)
#saving last epoch model
model.save_weights(utils.get_exp_folder_last_epoch(), save_format='tf')
#valid with set 1
print("Validation set")
psnr_1, nrmse_1, mse_1 = valid_step(valid_x_1, valid_y_1, num_val_minibatches, mini_batch_size)
#valid with set 2
#print("Validation set 2")
#psnr_2, nrmse_2, mse_2 = valid_step(valid_x_2, valid_y_2, num_val_minibatches, mini_batch_size)
psnr_2, nrmse_2, mse_2 = 0, 0, 0
#valid with set 3
#print("Validation set 3")
#psnr_3, nrmse_3, mse_3 = valid_step(valid_x_3, valid_y_3, num_val_minibatches, mini_batch_size)
psnr_3, nrmse_3, mse_3 = 0, 0, 0
utils.update_chart_data(epoch=epoch, train_mse=train_loss.result().numpy(),
valid_mse=[mse_1,mse_2,mse_3], psnr=[psnr_1,psnr_2,psnr_3], nrmse=[nrmse_1,nrmse_2, nrmse_3])
utils.draw_chart()
#saving best validation model
if psnr_1 > BEST_VALIDATION:
BEST_VALIDATION = psnr_1
model.save_weights(utils.get_exp_folder_best_valid(), save_format='tf')
train_loss.reset_states()
utils.draw_chart()
#experimentos results
print(utils.get_experiment_results())
#load best model
model.load_weights(utils.get_exp_folder_best_valid())
CONST_GAMA = 0.001
# valid_x_1 = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/X_val.npy", mmap_mode='c')
# valid_y_1 = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/y_val.npy", mmap_mode='c')
qtd_traning = valid_x_1.shape
print("Loaded",qtd_traning, "samples")
# #normalization
# test_x = utils.shift_and_normalize(test_x, SHIFT_VALUE_X, SCALE_VALUE_X)
# test_y = utils.shift_and_normalize(test_y, SHIFT_VALUE_Y, SCALE_VALUE_Y)
#batches
num_val_minibatches = math.floor(valid_x_1.shape[0]/mini_batch_size)
# test_batches = utils.random_mini_batches(test_x, test_y, None, None, 8, seed=0)
#metrics
val_mse = tf.keras.metrics.MeanSquaredError(name='val_mse')
val_custom_metrics = utils.CustomMetric()
import json
f = open('/home/arthursrr/Documentos/Audio_Inpainting/Datasets/idx_genders_val.json', "r")
idx_gen = json.loads(f.read())
for k in idx_gen:
for i in tqdm(idx_gen[k]):
data_x = valid_x_1[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_y = valid_y_1[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
data_y = tf.convert_to_tensor(data_y, dtype=tf.float32)
data_x = ((data_x+SHIFT_VALUE_X)/SCALE_VALUE_X)+CONST_GAMA
data_y = ((data_y+SHIFT_VALUE_Y)/SCALE_VALUE_Y)+CONST_GAMA
predictions = model(data_x)
val_mse(data_y, predictions)
predictions = predictions.numpy()
data_y = data_y.numpy()
#feed the metric evaluator
val_custom_metrics.feed(data_y, predictions)
#get metric results
psnr, nrmse = val_custom_metrics.result()
val_mse_result = val_mse.result().numpy()
val_custom_metrics.reset_states()
val_mse.reset_states()
print(k ,"\nPSNR:", psnr,"\nNRMSE:", nrmse)
# Closing file
f.close()
```
## Test
```
#load best model
model.load_weights(utils.get_exp_folder_best_valid())
CONST_GAMA = 0.001
test_x = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/X_test.npy", mmap_mode='c')
test_y = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/y_test.npy", mmap_mode='c')
qtd_traning = test_x.shape
print("Loaded",qtd_traning, "samples")
# #normalization
# test_x = utils.shift_and_normalize(test_x, SHIFT_VALUE_X, SCALE_VALUE_X)
# test_y = utils.shift_and_normalize(test_y, SHIFT_VALUE_Y, SCALE_VALUE_Y)
#batches
num_test_minibatches = math.floor(test_x.shape[0]/mini_batch_size)
# test_batches = utils.random_mini_batches(test_x, test_y, None, None, 8, seed=0)
#metrics
test_mse = tf.keras.metrics.MeanSquaredError(name='train_mse')
test_custom_metrics = utils.CustomMetric()
#test
for i in tqdm(range(num_test_minibatches)):
data_x = test_x[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_y = test_y[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
data_y = tf.convert_to_tensor(data_y, dtype=tf.float32)
data_x = ((data_x+SHIFT_VALUE_X)/SCALE_VALUE_X)+CONST_GAMA
data_y = ((data_y+SHIFT_VALUE_Y)/SCALE_VALUE_Y)+CONST_GAMA
predictions = model(data_x)
test_mse(data_y, predictions)
predictions = predictions.numpy()
data_y = data_y.numpy()
#feed the metric evaluator
test_custom_metrics.feed(data_y, predictions)
#just show the first example of each batch until 5
# print("Spatial domain: X - Y - PREDICT - DIFF")
# plt.imshow(np.hstack((data_x[0,:,:,0], data_y[0,:,:,0], predictions[0,:,:,0], np.abs(predictions[0,:,:,0]-seismic_y[0,:,:,0]))) , cmap='Spectral', vmin=0, vmax=1)
# plt.axis('off')
# plt.pause(0.1)
#ATENÇÃO!!
#predictions = inv_shift_and_normalize(predictions, SHIFT_VALUE_Y, SCALE_VALUE_Y)
#np.save(outfile_path, predictions)
#get metric results
psnr, nrmse = test_custom_metrics.result()
test_mse_result = test_mse.result().numpy()
test_custom_metrics.reset_states()
test_mse.reset_states()
print("PSNR:", psnr,"\nNRMSE", nrmse)
#load best model
model.load_weights(utils.get_exp_folder_best_valid())
CONST_GAMA = 0.001
test_x = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/X_test.npy", mmap_mode='c')
test_y = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/y_test.npy", mmap_mode='c')
qtd_traning = test_x.shape
print("Loaded",qtd_traning, "samples")
# #normalization
# test_x = utils.shift_and_normalize(test_x, SHIFT_VALUE_X, SCALE_VALUE_X)
# test_y = utils.shift_and_normalize(test_y, SHIFT_VALUE_Y, SCALE_VALUE_Y)
#batches
num_test_minibatches = math.floor(test_x.shape[0]/mini_batch_size)
# test_batches = utils.random_mini_batches(test_x, test_y, None, None, 8, seed=0)
#metrics
test_mse = tf.keras.metrics.MeanSquaredError(name='test_mse')
test_custom_metrics = utils.CustomMetric()
import json
f = open('/home/arthursrr/Documentos/Audio_Inpainting/Datasets/idx_genders_test.json', "r")
idx_gen = json.loads(f.read())
for k in idx_gen:
for i in tqdm(idx_gen[k]):
data_x = test_x[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_y = test_y[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
data_y = tf.convert_to_tensor(data_y, dtype=tf.float32)
data_x = ((data_x+SHIFT_VALUE_X)/SCALE_VALUE_X)+CONST_GAMA
data_y = ((data_y+SHIFT_VALUE_Y)/SCALE_VALUE_Y)+CONST_GAMA
predictions = model(data_x)
test_mse(data_y, predictions)
predictions = predictions.numpy()
data_y = data_y.numpy()
#feed the metric evaluator
test_custom_metrics.feed(data_y, predictions)
#get metric results
psnr, nrmse = test_custom_metrics.result()
test_mse_result = test_mse.result().numpy()
test_custom_metrics.reset_states()
test_mse.reset_states()
print(k ,"\nPSNR:", psnr,"\nNRMSE:", nrmse)
# Closing file
f.close()
def griffin_lim(S, frame_length=256, fft_length=255, stride=64):
'''
TensorFlow implementation of Griffin-Lim
Based on https://github.com/Kyubyong/tensorflow-exercises/blob/master/Audio_Processing.ipynb
'''
S = tf.expand_dims(S, 0)
S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))
y = tf.signal.inverse_stft(S_complex, frame_length, stride, fft_length=fft_length)
for i in range(1000):
est = tf.signal.stft(y, frame_length, stride, fft_length=fft_length)
angles = est / tf.cast(tf.maximum(1e-16, tf.abs(est)), tf.complex64)
y = tf.signal.inverse_stft(S_complex * angles, frame_length, stride, fft_length=fft_length)
return tf.squeeze(y, 0)
model.load_weights(utils.get_exp_folder_best_valid())
test_x = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/X_test.npy", mmap_mode='c')
test_y = np.load("/mnt/backup/arthur/Free_Music_Archive/Spectrogramas/y_test.npy", mmap_mode='c')
qtd_traning = test_x.shape
print("Loaded",qtd_traning, "samples")
#batches
num_test_minibatches = math.floor(test_x.shape[0]/mini_batch_size)
#metrics
test_mse = tf.keras.metrics.MeanSquaredError(name='test_mse')
test_custom_metrics = utils.CustomMetric()
i=5000
CONST_GAMA = 0.001
data_x = test_x[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_y = test_y[i * mini_batch_size : i * mini_batch_size + mini_batch_size]
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
data_norm = ((data_x+SHIFT_VALUE_X)/SCALE_VALUE_X)+CONST_GAMA
predictions = model(data_norm)
predictions = utils.inv_shift_and_normalize(predictions, SHIFT_VALUE_Y, SCALE_VALUE_Y)
predictions
audio_pred = None
for i in range(0, 58):
if i==0:
audio_pred = predictions[i,:,:,0]
else:
audio_pred = np.concatenate((audio_pred, predictions[i,:,:,0]), axis=0)
audio_pred.shape
audio_corte = None
for i in range(0, 58):
if i==0:
audio_corte = data_x[i,:,:,0]
else:
audio_corte = np.concatenate((audio_corte, data_x[i,:,:,0]), axis=0)
audio_corte.shape
audio_original = None
for i in range(0, 58):
if i==0:
audio_original = data_y[i,:,:,0]
else:
audio_original = np.concatenate((audio_original, data_y[i,:,:,0]), axis=0)
audio_original.shape
wave_original = griffin_lim(audio_original, frame_length=256, fft_length=255, stride=64)
ipd.Audio(wave_original, rate=16000)
wave_corte = griffin_lim(audio_corte, frame_length=256, fft_length=255, stride=64)
ipd.Audio(wave_corte, rate=16000)
wave_pred = griffin_lim(audio_pred, frame_length=256, fft_length=255, stride=64)
ipd.Audio(wave_pred, rate=16000)
import soundfile as sf
sf.write('x.wav', wave_corte, 16000, subtype='PCM_16')
sf.write('pred.wav', wave_pred, 16000, subtype='PCM_16')
```
| github_jupyter |
#JRW, HW4 Solution, DATSCI W261, October 2015
```
!mkdir Data
!curl -L https://www.dropbox.com/s/vbalm3yva2rr86m/Consumer_Complaints.csv?dl=0 -o Data/Consumer_Complaints.csv
!curl -L https://www.dropbox.com/s/zlfyiwa70poqg74/ProductPurchaseData.txt?dl=0 -o Data/ProductPurchaseData.txt
!curl -L https://www.dropbox.com/s/6129k2urvbvobkr/topUsers_Apr-Jul_2014_1000-words.txt?dl=0 -o Data/topUsers_Apr-Jul_2014_1000-words.txt
!curl -L https://www.dropbox.com/s/w4oklbsoqefou3b/topUsers_Apr-Jul_2014_1000-words_summaries.txt?dl=0 -o Data/topUsers_Apr-Jul_2014_1000-words_summaries.txt
!curl -L https://kdd.ics.uci.edu/databases/msweb/anonymous-msweb.data.gz -o Data/anonymous-msweb.data.gz
!gunzip Data/anonymous-msweb.data.gz
ls Data
```
##HW 4.0: MRJob short answer responses
####What is mrjob? How is it different from Hadoop MapReduce?
>Mrjob is a Python package for running Hadoop streaming jobs.
Mrjob is a python-based framework that assists you in submitting your job
to the Hadoop job tracker and in running each individual step under Hadoop Streaming.
Hadoop is a general software implementation for MapReduce programming
and the MapReduce execution framework.
####What are the mapper_final(), combiner_final(), reducer_final() methods? When are they called?
>These methods run a user-defined action. They are called
as part of the life cycle (init, main, and final) of
the mapper, combiner, and reducer methods once
they have processed all input and have completed execution.
##HW4.1: Serialization short answer responses
####What is serialization in the context of mrjob or Hadoop?
>Serialization is the process of turning structured objects into a byte stream.
In the context of Hadoop, serialization is leveraged for compression
to reduce network and disk loads. Contrast this to mrjob, where serialization
is leveraged to convienentally pass structured objects
between mapper, reducer, etc. methods.
####When is it used in these frameworks?
>These frameworks accept and use a variety of serializations
for input, output, and internal transmissions of data.
####What is the default serialization mode for input and output for mrjob?
>For input, the default serialization mode is raw text (RawValueProtocol),
and for output (and internal), the default mode is JSON format (JSONProtocol).
##HW 4.2: Preprocessing logfiles on a single node
```
!head -50 Data/anonymous-msweb.data
import re
open("anonymous-msweb-preprocessed.data", "w").close
custID = "NA"
with open("Data/anonymous-msweb.data", "r") as IF:
for line in IF:
line = line.strip()
data = re.split(",",line)
if data[0] == "C":
custID = data[1]
custID = re.sub("\"","",custID)
if data[0] == "V" and not custID == "NA":
with open("anonymous-msweb-preprocessed.data", "a") as OF:
OF.writelines(line+","+"C"+","+custID+"\n")
!head -10 anonymous-msweb-preprocessed.data
!wc -l anonymous-msweb-preprocessed.data
```
##HW 4.4: Find the most frequent visitor of each page using mrjob and the output of 4.2. In this output please include the webpage URL, webpage ID and visitor ID.
```
%%writefile mostFrequentVisitors.py
#!/usr/bin/python
from mrjob.job import MRJob
from mrjob.step import MRStep
from mrjob.protocol import RawValueProtocol
import re
import operator
class mostFrequentVisitors(MRJob):
OUTPUT_PROTOCOL = RawValueProtocol
URLs = {}
def steps(self):
return [MRStep(
mapper = self.mapper,
combiner = self.combiner,
reducer_init = self.reducer_init,
reducer = self.reducer
)]
def mapper(self, _, line):
data = re.split(",",line)
pageID = data[1]
custID = data[4]
yield pageID,{custID:1}
def combiner(self,pageID,visits):
allVisits = {}
for visit in visits:
for custID in visit.keys():
allVisits.setdefault(custID,0)
allVisits[custID] += visit[custID]
yield pageID,allVisits
def reducer_init(self):
with open("anonymous-msweb.data", "r") as IF:
for line in IF:
try:
line = line.strip()
data = re.split(",",line)
URL = data[4]
pageID = data[1]
self.URLs[pageID] = URL
except IndexError:
pass
def reducer(self,pageID,visits):
allVisits = {}
for visit in visits:
for custID in visit.keys():
allVisits.setdefault(custID,0)
allVisits[custID] += visit[custID]
custID = max(allVisits.items(), key=operator.itemgetter(1))[0]
yield None,self.URLs[pageID]+","+pageID+","+custID+","+str(allVisits[custID])
if __name__ == '__main__':
mostFrequentVisitors.run()
!chmod +x mostFrequentVisitors.py
!python mostFrequentVisitors.py anonymous-msweb-preprocessed.data --file Data/anonymous-msweb.data > mostFrequentVisitors.txt
```
####Check on output
Note that in the output below, the number of visits (col. 4)
for each frequent visitor of each web page is 1.
So, no page had > 1 visits by any individual,
and technically every visitor was a most frequent visitor.
```
!head -25 mostFrequentVisitors.txt
```
##HW 4.5: K-means clustering of Twitter users with 1,000 words as features
###MRJob class for 1k dimensional k-means clustering used in parts (A-D)
```
%%writefile kMeans.py
#!/usr/bin/env python
from mrjob.job import MRJob
from mrjob.step import MRStep
import re
class kMeans(MRJob):
def steps(self):
return [MRStep(
mapper_init = self.mapper_init,
mapper = self.mapper,
combiner = self.combiner,
reducer = self.reducer
)]
## mapper_init is responsible for reading in the centroids.
def mapper_init(self):
self.centroid_points = [map(float,s.split('\n')[0].split(',')) for s in open("centroids.txt").readlines()]
## mapper is responsible for finding the centroid
## that is closest to the user (line), and then
## passing along the closest centroid's idx with the user vector as:
## (k,v) = (idx,[users,1,vector])
## where 'users' initially is a singleton vector, [ID]
def mapper(self, _, datstr):
total = 0
data = re.split(',',datstr)
ID = data[0]
code = int(data[1])
users = [ID]
codes = [0,0,0,0]
codes[code] = 1
coords = [float(data[i+3])/float(data[2]) for i in range(1000)]
for coord in coords:
total += coord
minDist = 0
IDX = -1
for idx in range(len(self.centroid_points)):
centroid = self.centroid_points[idx]
dist = 0
for ix in range(len(coords)):
dist += (centroid[ix]-coords[ix])**2
dist = dist ** 0.5
if minDist:
if dist < minDist:
minDist = dist
IDX = idx
else:
minDist = dist
IDX = idx
yield (IDX,[users,1,coords,codes])
## combiner takes the mapper output and aggregates (sum) by idx-key
def combiner(self,IDX,data):
N = 0
sumCoords = [0*num for num in range(1000)]
sumCodes = [0,0,0,0]
users = []
for line in data:
users.extend(line[0])
N += line[1]
coords = line[2]
codes = line[3]
sumCoords = [sumCoords[i]+coords[i] for i in range(len(sumCoords))]
sumCodes = [sumCodes[i]+codes[i] for i in range(len(sumCodes))]
yield (IDX,[users,N,sumCoords,sumCodes])
## reducer finishes aggregating all mapper outputs
## and then takes the means by idx-key.
def reducer(self,IDX,data):
N = 0
sumCoords = [0*num for num in range(1000)]
sumCodes = [0,0,0,0]
users = []
for line in data:
users.extend(line[0])
N += line[1]
coords = line[2]
codes = line[3]
sumCoords = [sumCoords[i]+coords[i] for i in range(len(sumCoords))]
sumCodes = [sumCodes[i]+codes[i] for i in range(len(sumCodes))]
centroid = [sumCoords[i]/N for i in range(len(sumCoords))]
yield (IDX,[users,N,centroid,sumCodes])
if __name__ == '__main__':
kMeans.run()
%%writefile kMeans_driver.py
#!/usr/bin/env python
from numpy import random
from kMeans import kMeans
import re,sys
mr_job = kMeans(args=["topUsers_Apr-Jul_2014_1000-words.txt","--file","centroids.txt"])
thresh = 0.0001
scriptName,part = sys.argv
## only stop when distance is below thresh for all centroids
def stopSignal(k,thresh,newCentroids,oldCentroids):
stop = 1
for i in range(k):
dist = 0
for j in range(len(newCentroids[i])):
dist += (newCentroids[i][j] - oldCentroids[i][j]) ** 2
dist = dist ** 0.5
if (dist > thresh):
stop = 0
break
return stop
##################################################################################
# Use four centroids from the coding
##################################################################################
def startCentroidsA():
k = 4
centroids = []
for i in range(k):
rndpoints = random.sample(1000)
total = sum(rndpoints)
centroid = [pt/total for pt in rndpoints]
centroids.append(centroid)
return centroids
###################################################################################
###################################################################################
## Geneate random initial centroids around the global aggregate
###################################################################################
def startCentroidsBC(k):
counter = 0
for line in open("topUsers_Apr-Jul_2014_1000-words_summaries.txt").readlines():
if counter == 2:
data = re.split(",",line)
globalAggregate = [float(data[i+3])/float(data[2]) for i in range(1000)]
counter += 1
## perturb the global aggregate for the four initializations
centroids = []
for i in range(k):
rndpoints = random.sample(1000)
peturpoints = [rndpoints[n]/10+globalAggregate[n] for n in range(1000)]
centroids.append(peturpoints)
total = 0
for j in range(len(centroids[i])):
total += centroids[i][j]
for j in range(len(centroids[i])):
centroids[i][j] = centroids[i][j]/total
return centroids
###################################################################################
##################################################################################
# Use four centroids from the coding
##################################################################################
def startCentroidsD():
k = 4
centroids = []
counter = 0
for line in open("topUsers_Apr-Jul_2014_1000-words_summaries.txt").readlines():
if counter and counter > 1:
data = re.split(",",line)
coords = [float(data[i+3])/float(data[2]) for i in range(1000)]
centroids.append(coords)
counter += 1
return centroids
###################################################################################
if part == "A":
k = 4
centroids = startCentroidsA()
if part == "B":
k = 2
centroids = startCentroidsBC(k)
if part == "C":
k = 4
centroids = startCentroidsBC(k)
if part == "D":
k = 4
centroids = startCentroidsD()
## the totals for each user type
numType = [752,91,54,103]
numType = [float(numType[i]) for i in range(4)]
with open("centroids.txt", 'w+') as f:
for centroid in centroids:
centroid = [str(coord) for coord in centroid]
f.writelines(",".join(centroid) + "\n")
iterate = 0
stop = 0
clusters = ["NA" for i in range(k)]
N = ["NA" for i in range(k)]
while(not stop):
with mr_job.make_runner() as runner:
runner.run()
oldCentroids = centroids[:]
clusterPurities = []
for line in runner.stream_output():
key,value = mr_job.parse_output_line(line)
clusters[key] = value[0]
N[key] = value[1]
centroids[key] = value[2]
sumCodes = value[3]
clusterPurities.append(float(max(sumCodes))/float(sum(sumCodes)))
## update the centroids
with open("centroids.txt", 'w+') as f:
for centroid in centroids:
centroid = [str(coord) for coord in centroid]
f.writelines(",".join(centroid) + "\n")
print str(iterate+1)+","+",".join(str(purity) for purity in clusterPurities)
stop = stopSignal(k,thresh,centroids,oldCentroids)
if not iterate:
stop = 0
iterate += 1
!chmod +x kMeans.py kMeans_driver.py
```
####Run k-means for parts A-D
```
!./kMeans_driver.py A > purities-A.txt
!./kMeans_driver.py B > purities-B.txt
!./kMeans_driver.py C > purities-C.txt
!./kMeans_driver.py D > purities-D.txt
####Plot cluster purity output
from matplotlib import pyplot as plot
import numpy as np
import re
%matplotlib inline
k = 4
plt.figure(figsize=(15, 15))
## function loads data from any of the 4 initializations
def loadData(filename):
purities = {}
f = open(filename, 'r')
for line in f:
line = line.strip()
data = re.split(",",line)
iterations.append(int(data[0]))
i = 0
for i in range(len(data)):
if i:
purities.setdefault(i,[])
purities[i].append(float(data[i]))
return purities
## load purities for initialization A
purities = {}
purities = loadData("purities-A.txt")
iterations = [i+1 for i in range(len(purities[1]))]
## plot purities for initialization A
plot.subplot(2,2,1)
plot.axis([0.25, max(iterations)+0.25,0.45, 1.01])
plot.plot(iterations,purities[1],'b',lw=2)
plot.plot(iterations,purities[2],'r',lw=2)
plot.plot(iterations,purities[3],'g',lw=2)
plot.plot(iterations,purities[4],'black',lw=2)
plot.ylabel('Purity',fontsize=15)
plot.title("A",fontsize=20)
plot.grid(True)
## load purities for initialization A
purities = {}
purities = loadData("purities-B.txt")
iterations = [i+1 for i in range(len(purities[1]))]
## plot purities for initialization B
plot.subplot(2,2,2)
plot.axis([0.25, max(iterations)+0.25,0.45, 1.01])
plot.plot(iterations,purities[1],'b',lw=2)
plot.plot(iterations,purities[2],'r',lw=2)
plot.title("B",fontsize=20)
plot.grid(True)
## load purities for initialization C
purities = {}
purities = loadData("purities-C.txt")
iterations = [i+1 for i in range(len(purities[1]))]
## plot purities for initialization C
plot.subplot(2,2,3)
plot.axis([0.25, max(iterations)+0.25,0.45, 1.01])
plot.plot(iterations,purities[1],'b',lw=2)
plot.plot(iterations,purities[2],'r',lw=2)
plot.plot(iterations,purities[3],'g',lw=2)
plot.plot(iterations,purities[4],'black',lw=2)
plot.xlabel('Iteration',fontsize=15)
plot.ylabel('Purity',fontsize=15)
plot.title("C",fontsize=20)
plot.grid(True)
## load purities for initialization D
purities = {}
purities = loadData("purities-D.txt")
iterations = [i+1 for i in range(len(purities[1]))]
## plot purities for initialization D
plot.subplot(2,2,4)
plot.axis([0.25, max(iterations)+0.25,0.45, 1.01])
plot.plot(iterations,purities[1],'b',lw=2)
plot.plot(iterations,purities[2],'r',lw=2)
plot.plot(iterations,purities[3],'g',lw=2)
plot.plot(iterations,purities[4],'black',lw=2)
plot.xlabel('Iteration',fontsize=15)
plot.title("D",fontsize=20)
plot.grid(True)
```
###Discussion
As a general note, our comparison of initializations must be 'taken with a grain of salt,'
as A,B, and C all incorporate some randomization into initialization,
leading to results that will vary from run to run.
If we wished to compare the randomization initializations with greater confidence,
it would be best to run our experiment a number of times,
recording purities at convergence, the numbers of iterations before convergence, etc.,
and summarizing these results across runs.
The above being said, in the printout above we can see across those runs with k=4 (A,B, and D),
that the 'trained' centroid initializations (D) succeeded in converging in the fewest iterations.
In addition, we can see that when D is compared to all other initializations,
the top three (most pure) clusters are generally purer (>90%) after convergence,
indicating that the labeling of users accompanying the data are likely meaningful.
Of all of the initializations, we can see that B performs the worst with regard to
cluster purity. However, this not exactly a fair comparison, as the purities of 2 clusters
with 4 labels will necessarily be low---it is not possible to isolate all user types!
However, if we wished to see the value of this initialization
in the context of the dataset, we could take note of whether
the two most human classes (0 and 3) are clustered together,
and separate from the two most automated classes (1 and 2),
which is likely the case, but would require keeping track of the
number of each user type present in each cluster (at convergence).
Looking closer at D, we can see that one of the clusters is very non-pure,
indicating that some users were discordant with respect to their classes,
forming a mixed cluster in terms of word-frequency.
However, we must be careful not to mislead ourselves while interpreting these results,
as we do not know which user class dominates each cluster. Noting that approximately
75% of all users are labeled as human, it is possible for all clusters to be dominated
by the human class. This however, is not the case, and could be made clear through further analysis,
plotting the numbers of each type present in each cluster at convergence
(which would also inform us of the fact that the least pure cluster is a split of
robots and cyborgs, who can actually be quite similar!).
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.datasets import fetch_20newsgroups
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.preprocessing import StandardScaler, PolynomialFeatures, FunctionTransformer
from sklearn.impute import SimpleImputer
data_train = fetch_20newsgroups(subset='train')
data_test = fetch_20newsgroups(subset='test')
dir(data_train), dir(data_test)
```
# Создание пайплайна с использованием стандартных функций
```
preprocessor = Pipeline(steps=[('embeddings', HashingVectorizer()),
# ('imputer', SimpleImputer()),
# ('poly', PolynomialFeatures()),
# ('log', FunctionTransformer(np.log1p)),
# ('scaler', StandardScaler())
])
pipeline = make_pipeline(preprocessor, LogisticRegression(max_iter=10000))
pipeline.fit(data_train.data, data_train.target)
pipeline.score(data_test.data, data_test.target)
```
# Создание своего класса, преобразующего данные
```
class TextTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y = None):
return self
def transform(self, X , y = None):
vectorizer = HashingVectorizer()
X = vectorizer.fit_transform(X)
return X
X_train, X_test, y_train, y_test = train_test_split(data_train.data, data_train.target,
test_size=0.3,
shuffle=True,
stratify=data_train.target,
random_state=42)
for data in [X_train, y_train, X_test, y_test]:
print(len(data))
scores = []
clf = LogisticRegression()
pipeline = make_pipeline(TextTransformer(), clf)
pipeline.fit(X_train, y_train)
scores.append(pipeline.score(data_test.data, data_test.target))
scores
cv = StratifiedKFold(n_compone)
```
#### Пример разных пайплайнов для разных типов полей
```
from sklearn import set_config
set_config(display='diagram')
```
| github_jupyter |
# COVID-19 Scientific Analysis
### What is Covid-19 ?
Coronavirus disease 2019 (COVID-19) is an infectious disease caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2).
The disease was first identified in December 2019 in Wuhan, the capital of China's Hubei province, and has since spread globally, resulting in the ongoing 2019–20 coronavirus pandemic.
The first confirmed case of what was then an unknown coronavirus was traced back to November 2019 in Hubei province.Common symptoms include fever, cough, and shortness of breath.
Other symptoms may include fatigue, muscle pain, diarrhoea, sore throat, loss of smell, and abdominal pain.The time from exposure to onset of symptoms is typically around five days but may range from two to fourteen days.
While the majority of cases result in mild symptoms, some progress to viral pneumonia and multi-organ failure.
As of 23 April 2020, more than 2.62 million cases have been reported across 185 countries and territories,resulting in more than 183,000 deaths.More than 784,000 people have recovered.
<p><a href="https://commons.wikimedia.org/wiki/File:Symptoms_of_coronavirus_disease_2019_3.0.svg#/media/File:Symptoms_of_coronavirus_disease_2019_3.0.svg"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/b/b4/Symptoms_of_coronavirus_disease_2019_3.0.svg/1200px-Symptoms_of_coronavirus_disease_2019_3.0.svg.png" alt="Symptoms of coronavirus disease 2019 3.0.svg"></a>
### Covid-19 Symptoms
COVID-19 affects different people in different ways. Most infected people will develop mild to moderate symptoms.
Common symptoms:
* **Fever**
* **Tiredness**
* **Dry Cough**
* **Aches and Pains**
* **Nasal Congestion**
* **Runny Nose**
* **Sore Throat**
* **Diarrhoea**
On average it takes 5–6 days from when someone is infected with the virus for symptoms to show, however it can take up to 14 days.
People with mild symptoms who are otherwise healthy should self-isolate. Seek medical attention if you have a fever, a cough, and difficulty breathing.
### Covid-19 Preventions
<center><img class="lqwrY" src="data:image/gif;base64,R0lGODlhkACQAPIFAMzMzP//////zJmZmelCNQAAAAAAAAAAACH5BAU2AAUALAAAAACQAJAAAAP/WLrc/hACQKq9OOvN94xgKI5kGVBdqq4qEJRwLJMoa983MO+8PODAYGrQKxodgZ9wybQMXsfoLtCsVqHS7IjaGUy+4LB4TC6HlRysdo3kDASBuHxOr9vv+LsAnVGz2VwaLnmEhYZ5NX1/f4EZg4eQkYWJGH6LRo0Yj5KcnXOUF5aXUxybcWaoqaphdaAWoqMwmRems1ZMrWmxpB51t78VuRuwuxG2FbXAysIaxMUNxwTJyst0rsHPIdHT1NWfutkP277d3cyK4QzjdOXt55XpCutz7fXvoenzcvX896/Z+uLwG+gP266A0QYCK0jAWRSECiMydFgEYkSJ1sCtsXgR/+O3YRtLkVMIAM4JhROzcBzIzV5GkEdWEmS48GUzTCLZXawlEKVNdLwEjYzIM4lHOdcaBnU0lOidpBag3kgZQ2aTR1BN0tQqwApVElaZmEqq9WelpkK+astJr6bZV0970bShFkJYsXau4dFrp6vXt/+Msd2nrOiwuM3u/Kq74O5VxYnzangC+RbjAnxoob1VNHMwxJrweEO6gUiDay0Lgw61Oqro0ac8MEC9+VdRV3uFVnZLWtACuW3b3Y7sr+wcv9QKKnCVutvwPq0/Sy7nT4fn5s71EKeDnPV06m+fbE8IjLKd8XK6w/0OPjgtpu4fx85gfjMe9QTqA4YNCpROK/+m4NfQbuvVgV9RJ81EWAofWWFcgt6hpd9/rkWX3YIdNHjFPXhkNmF80nRIEojESTXVeWexF+JrFzyIYT2brKBhEwQOqCKCs+Th1IuHzfcXWrlpxmKBtVETowoz4qJikBXWqOOIPJa42JLaCUngh1Fe2BODvf1o1pNNohhmkaplmSKEG4pZgYtbIgOmm1WyRCJ0PqbZFJuZ4BiahVqSJx2aNBKI4J4E4mkiZ3OeeSgLTK5oBx96RjiXbYlGuOgKbCaCoKZv2qiicJUWeKkKbHa3qZVqejopomZaSulup45JIZZ1whjqn6OmMKisXRKAZyM4elbOkVzWitcdaCDY3a//qJJZZptp9Crfd5Gu+Wa1crYq6qvUvklIDYTsdKunuXZQLY4o4BibuuJqiyu3kqkbgADyQtIutD0Ceqwn/OYh4LDjJrlEvf1ysmgYQhCbobT7Fuwww5q4WK7C0Rq7BK0PdwIVujhQnG+5bmQsMlmE/DuYjBAvIXLGwvpqSMcBp5zWyg4LW6/JwPlJLm80e9LfIbl6LOWUPfNLCcETx2yxykX3ewHGAregtL5KNu0znJMkPDXIFVvtNddY6xz1zF5bDXbYKC9Ndtk9n402kjIHwXbRLcPs7s5Ez80yq/gObZneGbuNatpUMw14wYIPDrfach/eb+KKF1v42o5LgrPW/3eP3XjlnPRJOOR/cg6054tPvrnohoBOqNhxA4G6vQBn3joOr6cOquyMA/Fr7ToPvDUwBHOuOnx902k6EME7fnkVQhuvevKH1833LJ5RyDzvfSmIr3+au449HdIDaL00xOd+4vdxLD+tsbasiin65jfcdwG47Wc3+raOL00Bx7gvOe/Da1b/lpOv+P2vdrebkw7k0TUDDuZ1AQxb8ZSygKT4r4Coi12iFjibBh5PVwDs0wQ5eBoPno13ItwGCCxoP6ll0EhFIqEETOg74cFQf8igAQ2FsLuiBRA7OSwBC7tnrsP9MIYyGOLsHmi2G25wB0p0oAZ6+LgUlqIHUfygueqoaLnwYe6JRcii4KDXqGeZSYY8ECNnSmK5CArwilJQo5HMcK8zrkGOHYESGO+4wzzmb49swKMfnWjHUQhykNMbYTEOiUj5qfAZjGwk8pAYjkhKkgVA3F88LHnJIuJQk/GgXx876UJAhpKTpJQVHEPZQQy6EV6KZKUDUHnJTKJRlgR0ZSpXF0tcPoCWfrSlL1c4ylpScpgRyCL8PnlLZJYQg8vspTOJCU34rXKaISBZNE3WTGz+0g1s5B296tZNb35zl0EopznPiU4bqHOd7GynruA5g97tkiL0TKY8fZPPHpzAi4P0Aj6zkAAAIfkEBQYABAAsJwAaAE0AWwAAA5ZIutz+LUgBq7046827/2AojmRpnmg4AENaUe4izXED13iuk/fu/8CgcEgsGo/DADLVWzorgKenKQ0pVchrdbulcr/gsHhMLpvP6LR6ffay3/C4fE6v2zXuS+tOiPL/Tn6AHFqDbHmGiYqLjI2Oj5CRkpMMiJSXmJlqgpqdnp8Me6CjpKWXoieWpj+qqzsDhXSxrmuoNQkAIfkEBQYABQAsJgAYAE4AXwAAA/9Yutz+DggBoL04azuC/1sojpa3eBWprhgQOC8rz0raBDat6/k+SxSMADK8uAKCga/0MVmKDuijiVpGnAVkibhlYK0KQeyknELGjZ4CBz7p0Bl2O2sWwRfqrNR6V/Uvf2+CDgRPc3QPhSF9ig95O39lG32SEYeBdiOPgzN9m5s0eyueZ6AZAJuiNZlRZ5UiR1Uhql2TK0dklhuvXoS1I2INoqa2DY0wo4lpIcQMvG5+ygybxwXNGpiAjssQz8y/rNx4c6nJuuNttNbmvuiH29HSqxDXCpXV4iORzoYh90YraOXooKOStyz1hNG7UXBSwmlcetHIsSnAwRYWjgXCZ4H/IjhYfzRy+LZAXTYj+6BlULcOGwsXC1UKAaiAo0xNf3Kc1GDD5pqXG29CFDq0wMWd9IL+jLg05kqgI5vKQPpwWqAiMEugOgmCpoqsZ4Jh+PJjFyobBN+RkMMh1ocKadXaAeWWihKkcnM96EB2zRC8eRFNwfQCcN4/YrcYluvpZIXFavvsLAT5XeUBlAMvIgGAgKLK72wQ69wI9CVUPD1L1LykMzLWPgRwNA2bgewztXX4pJ0bFO/c2oDHCy58bpzixo8jL/Z0eZxrLJ0jnCWdOM/qpUBif83s9xy2G8Bu14Jz+4mqgrGTX2s+y0Xr0tdz9u7DIlT1ndSjX+2ct/jiT+CxQF9+PAw4yn51ANiagfogeAaDDrXRF2MOjgWhVpFdCEOF4Wm4RoDv8CUhiHlJMKEfJMJmYleLeBAEdgNsRcWMTVDAoWYxnoXKAO/NkQAAIfkEBQYABQAsJgAeAE4AWAAAA+JYutwLLsrpALk46807EVQoSsNonmiqrqwDtnAszzT81nh+53zv/8CgcEgsGo/IpHLJbDqf0Kh0Sq1ar9jhLosjcL/gsHhMLlMAaGPABwi43ZCwQNN+v+NkgX1fru/fZX92W19+ghYYYIJvcx1Yi3COj5AeWAOQiBxYhnuZG5uQA5VXkAGjVpx7p1Spdp6Jk5ifXKWiGWYMGrgLjRe7D7C/BcG/mcIKxL++xwWIzMNez9HMFs/D1oTW2tvCph4dJdxVa9Z4x+TP5uLr7GDZu+Ht8vP0S+9m9/X6E+r7/tvxgCQAACH5BAUGAAUALCYAGABHAF8AAAP/SLrc/jDKSau9OOvNu/9gKI5kaZ5oqq6sAwSwMLSjAN/BTH8vfu8SAGDiw+mAjB5sCCnemBCh8KR8PgbOJQSLE5iyx0XV5304y6MsVJwNoJPgEdfpar8X7TWvXec77CNjOH1ZDnN0IoJWcH6MhYl5DYo/koCQapWNbJiXTnqTMA42nBVVegSgd6ABf5EWRXqHRZmPDG0Bp1GIC7I+DaO1eK4UpAS9gwzAuwrHi8SeDbe/0o5OYRJZd7dht6zVs6/BBLdr3bTLROLkttTCmunQ7vEK5uzv2MXr8uj1FcPjlui1W+XmQjuAmsw1I2Mw4cFuBJ09C9YPYbeLuZoMq3ix9COuhqS6HfGI8YIyH1BOotxHEpbJdhCZtdz2so2OiwpUzrxxTYJOHEJ2CgV6IeLQo6EsGEU6NCMhplDBUVgadabTb1WjXt2UNetWBVS7eiwltivZslXPooWqdi3Sr2HdFtMl9+3UukjvPIiL19cEvn0pBQk89C7hnYYPt9wKmDBjxVb/Ql48YeHkIj0bWL6MDB7ngxE+x5woeh6+0uhCo05Nd/UNva1df2zrOrNG2d7Cyf6KVbTt2Kg17NbQGC2H1bz3loYNkvOHzXVDFGf6O8P0odWJE2YO4mfX5Byu62MR1CsSsMUFAMhOY4CUn+qnnJ+vIQEAIfkEBQYABQAsJwAhADwAVQAAA/9Iutz+MMpJq7046827/2AojmRpnmiqrmzrvnAsz3Rt3yUQ7AGANwKecPAj6ITCByDYaw2QSN/iiIRQm50rVDCF8hzaHVECAIwZTO9uod5JFeGvNcpoy+M8Lrv9RnvfdmsEaWpddmB8CoEBioGGbYhqUos+iwuEXg6YVQSUnY6Nhw2gnpahkKOipaB4SaltRKuirXJ1dpWBArRuT6a2bbqLwsOCv8THyDt6xsnNnq/O0YnQ0tVCfafW2tif2t7c3uHg4dsO5N7m59Zn2erN2O7W1PHJffTVzPfIgPrRe/3N/gHc126gsEkGCXZLOAwhw4YFH0qKKBGKlE0VoYzBmNFhlZGOoj6CLARnpJdlvUxey2fSnkqPCjh2bLDr4biXD14GWMagpkF2LCXypGlywkhuRDMijfRwqASZ9JxOgKpu6QSf5ThgjWYVw9ZjXTeU0aZLxYCxx3SFRXG2jNu3QDkkAAAh+QQFBgAFACwoADQAOgBCAAAD/0i63P4wykmrvTjrzbuPgBCMwJcBaOmIYxuAqEkALsnUrsrQuYe3Kt7vxWDVdKfhSKG0KYQ45KUZAAyoggVVkhrssNDhc/sIV8dUMw6tlBKMxze1OlfAf1J1S56uE+Z5TSGAfoANaXc/iTgCizVeN3OSk36RlJeYS4eZnJMOnaBklqGkLp+lqJqjqaGnrKSur6BSsqRZq7WZm7mdt0y8oLjAlDrDwb/GmZDJnCrMmc7Pl7fS08jVc9TYktrbWNfeQ9HhbeDkcX/n4nbqPwt65+/tawpX80DC50j3e1/3vmzaueFXZp4bc95AqIMEQd8EeMYOOoDIS2JBaRYhUGSVUR7hsI4UNnISwPCDyEkgOaAYaUWGhRQhGsns4rKmzQQAIfkEBQYABAAsKAA2ADoAQAAAA/9Iutz+DEhIq71LhL0F/tcwOQBnbpA0go9mesx5Ak05s43M0YSro4tfgIcTAo0bnu2HIyx/EmQAhiQ6BgOHdLglDKStm2LrMzrBEaiX3H3+skFhdNvtLtwnAZ6u+6LHVXxSezIxSGWCiSaGio18jI6RQjCAkpYylASXmyZWnJ+en5uhopaZpZuQqI5Eq5dxrpE8sZaVtI1Zt7Kauo68vYq/wIKEw0bFxlDJxMt8yM2d0FvP0lzVx4jXndTQZ9o63t8n4eIctuWz5Tvn4nfqUwt+6KraNfM57Q7ZzVb0y/1pqlkQeKHbh2SZMOxbBRADN0kJmzxMJABOExKgLjqcuE0Ko0YRKkKqsOgxAQAh+QQFBgAEACwoADcAOgA/AAADlEi63P6QjEgrBPbhBkQIwpaNwaSYUMAAaiOMDhq10SvNcJ61tKb/qRulxxHBbJaBESITAnPLy3OqiPqoT6tDu8VyMk3vCEkJ48TotHr9JLKp7ndbLibT53d4fs/v+/+AgYFcgoWGh4iJiouMjY6PkIxxi3aMZpGYmZqbnIY0hJ2hfpeihaSlqKmqQJOrrq+wFqCgGQkAIfkEBQYABQAsKAA2ADoAQAAAA6RYutz+bMBJq23AkcurhN8TOgIwdkzGCJvSXqeiou70CdU84XTv/8AghBCzBISNVy2os/WKSAgvWoBSg9OrdntRVrLcsHhMLpvP6HQarG673/B42CpXsOtJvH7PJ9P7gIGCg4SFhhR3egNNfF6Hj3KJkHiSk2sLf5aaZI59jG9FnZtilQWlo6gWnxSrqTStcgCiPqcXtVg0t1qzCgMEv8DBwsPACQAh+QQFBgABACw4ADcAKQA+AAACOYyPmaDtD6OctNqLs968+w+G4kiW5omm6sq27gvH8kzX9o3n+s73/g8MCh2DofGITCqNgqXzSXQVAAAh+QQFEgABACwrADYAKAAMAAADIBi63P4BwElrEzbrzbtXEkd8ZGkG42mFatsOLeZaspYAACH5BAUGAAMALFAAPwABAAQAAAIDhBIFACH5BAUGAAQALCgAOgA6ADwAAANvSLrc/nAFMaO9OIeQu+/bJ45NSJ6fia6Xyr4OB890bd94ru8PwK+yHyngE4o2RaNnE1SCmk6MK9qCUi2BwVWT3EaIXkw3/BCQz+jbOM1uu9/wuHxOr9th2rt+z8et2Xl9goOEhYaHiImKi19XgTkJACH5BAUGAAUALCkAOgA+ADoAAAOpWLrc/lABQqO9OLN6gddgSAxYYAZAqFpDep0muc5MW8IuTQ/Cfea6FTACMwVpQ0gxcJz1fL+m1LFkTq+FahJLqz65zSpYqh0fy+bgcpsGrdu6N3y2lM1Vyzsdp18V7X0aRYF4UYQhJ4eFX4oZJmyND0aRgpCUDQGAl0SbnZ6fnZqgG6OlpkGMpwWWqqcErbAZr6CzramnoqW3prWxvr/AGrvBxMWgrIJBCQAh+QQFBgAFACwtADsARgA4AAADyli63P4wMkKlvTg/yrX/H9eBZOkAImGuJiqy8OeOcW0Nqa1HaXUCwMFu1VMxAIGkEjAsFY0FpTTJbIaKiqnWeu0VkFop1/OMhsXjTHFwllbTF2ybClenBObzu25JVdt7fBIib2d4ghgue2eIGgCHWWGBjRqSlCVhlySWmh9ak50WmaEeW6SlbqeVqaoZU62rSbCuS7MYtbYXSrm3Aby3oL8NwcLFjsbITsnLx8zOz83QBVDPxNLXzNaJmpAr3djg4WPfywI4T+jpFAkAIfkEBQYABQAsMQBCAEUAMAAAA6JYutz+MEpGap04a2w72WDIeZ1ongVJAMDwoXDmAUFtC0OsP55g/7UBcEgsGo0ClecY8DGfUGJSSRA+d1gLDYrVaaOBLuwb3YLPT5ITChCLOla22wSomJmCuX5x2+9tbX5zgIJ6NYGFYoeJbouMXY6PWAGIkjqUlpOVmSiYnDqbn3SiMXmknaepqqsbOayvsLGyMrO1tre4ubq7vL2+v7OuvwkAIfkEBQYABQAsKQBFAE0ALwAAA3tYutz+MMrJiKU4610IAEIhfARnnhgACSXqvqKkwrRGhHVO4c+g/xMecJgRngLE3wwT8CVpt0xg+awuptbs1am1IrtZI5goHpvP6LR6zW673/C4fN7+0u9gKn7/bPH/RHocgoCFhkWHiYqLjHFcjZCRboSSlZZmlFqPfwkAIfkEBQYABQAsLgBFAEgAMAAAA5tYutz+MEoIKpg46w2C90VQEFtpYsP3AOTpvh0IEe1rZ7EYsXevXA+ZhOe7ARtCCa3IXCR3y2ZRoJuwotLT8fkQ0GrZU0BAxWHD6IX3nEZf2/DCO56e08P2uzSvb7L7RX+APUSDTIWGPl6JfoyOj5BoA5GUjFWVWpiam5wMk52geKEaYKMQAqapqnClq659R6+ysy+ttLe4GrGdCQAh+QQFBgAFACwyAEUAQwAwAAADeVi63P4wygWmvfgCkbvv1SeODyeZZAqhEqG+TDgJLvzKE25juh4Btd1oENAEhR8fRIm8BIoWYHNKoVqv1AG2qd16v+CwgynusMqjI3odJrPf8Lic5J43znZJNz/jX/Z+K4E5g4WGh4hlgIl4iY6PkJGSYI2TlpBqgQkAIfkEBRIAAQAsWgBtAAEAAQAAAgJEAQAh+QQFBgAFACxQADcADwA4AAADPli6AzArKhBCqQHIoneO3XZB4iaUJYFGg7qC73K+ZOzGeK7v+8yvoZ9wSCwaj8ikcslsOp/QaPQmfQIIWGwCACH5BAUGAAUALCkANgA2AEAAAAODWLrc/jACIMCIOOsGgv/CJo6LEDgeqWIdFABr3JwRLd+YjZPE7juXn1AUHGJ6mqIRMoBlkMtIKHpzUq/YrPYx3Y663o01rBmTMcpzJK1uu9/wuHxOr9vv+Lx+z+/7/4CBgoOEN2CFiImBZopXjI2QOIeRlJCTP2w8lSV0AASfoKGiowkAIfkEBQYABQAsOwA1ACQAQAAAA1NYutz+AIxHqxsiaA2sd0DgBN1nilRpriPrvnAsL2pMfDU8efnsV7efcEgsGo9IVi/JbDqf0Kh0Sq1ar9isdsvter/gsHhMLpvP4h167V2yP4JnAgAh+QQFBgAFACwoADMANwBDAAADbli63P4wriGrvQ2EvQH+oKJxnBee0dh0aLtCmyubMC3fjo1jQqXvuB9wSCwaGwQL5RhJVpzMxzJ6Igiplx52y+16v+CweEwum8/otHrNbrvf8Lh8Tq/b7/h8+arv+/+AgYKDhBBQhYiJiouBh0QJACH5BAUGAAUALFAAKwAPABsAAANCSLoLwFCJQMOLpOrLtOcE4GkCI46ViabLtAbdCy/yrNQNrpxv/pY7Gac2FNKEg5sxVEsyZb6eEuqowayUjLWEfSQAACH5BAUGAAUALFAAIgAQACEAAANTWLo8w3CBQAOIstaLtX7Q5FEcI4xVhKbQSqkuNLhC+C5b2Fl6VxaEmo9BwEiIRgVIUUzKnD2o4uckUJNBKVO75Ta112TYOG4Ay4vH16nWtrVZaQIAIfkEBQYABQAsUAAeABEAJQAAA01YujwALDIRaoBy2Z0V2JbQUWDVmRoqBWswdBkLrxk2M/bt6XxG9IofUAgs5nTHm6g4LBJ5LyYwOaPCllLdU8vDRG/WDiaM2yGLIjIjAQAh+QQFBgABACxVAB4ADAAQAAADFxiqNLQwgCUeFDHrqbv/gQWOZGme5AAmACH5BAUGAAEALFYAHgAKABsAAAMkSLrM8G2EOceiWAFMN/9gIIVkaQomiIZe6r7hCBJwbZcrCBAJACH5BAUSAAAALFAAIAAQAB0AAAMaCLpKDO7JSau9OOvNu+hgKI5kaZ5oqoLDlgAAIfkEBQYABQAsQwAfAB0AIgAAA0BYutyupMRHq1i36s27/xgojmRpnmh6AmrrvnAsz3Q9ACwKBDyfkYJAg5cDFR1C4+bISVacNdOkNVUNXldXtpMAACH5BAUGAAUALEIANAAPAA8AAAMmSEoj8QGsCaANk9RrJ7+S9llCYZ4oGqRsOrRwLM90IY5dbesEDiYAIfkEBQYABAAsQgAzAA4AEAAAAyJIqhAeYK32qhw1h6VzlOAihOS2kWiqrmzrrlRXxbIj1FUCACH5BAUGAAUALEIAMQAOABIAAAMqWKogEQCs+eoTs1QasHrZAhVAEE6SdyqYuVJvLMfEXNTzYO98L5eWYCUBACH5BAUGAAUALEIAKQAOABoAAAM+SLo0/ASEOQGUlNqFM2WetxGhpwxl5qBp1bRul1pyScOBEOE33Le/2c43BBaFtVDQdmQmRYNnBhDFBQACawIAIfkEBQYABQAsQgAfAA4AJAAAA09IugTAkIQ5X6TYLoAx7NkygJTGkYGwodPKuihMyiDd2Z5yxjob4BRgS+ELWIpH34CoVAh8i+eLOZVAqaillaUtdptbVJI1FmNJZbSjGEgAACH5BAUGAAUALEMAGQANACoAAANXSEogEWBJ8GqQyta4qH7T9wydCC2OyBHpt7baap6ZKdvLfKs5XvM/Vw9ImAmGn2NQoyyamrMLYRBVUGfWKkPrMSm6oi93jN2WwR/xmew1t9EadRsmjD4SACH5BAUGAAEALDgAGQARACEAAAIZjI85kO0Po5y02ouz3rz7D4biSJbmCQlXAQAh+QQFBgAFACw0ABkAGwAnAAADO1i63EwFAEdrGW3azbv/HQSOZGmeaKqubOu+cCzPdG3feK47WAoEQKCGJAg0gESL8dOzDDkC6M4lWiUAACH5BAUGAAUALDUAGQAZACQAAAM8WLrcTJC46Qhgg+pym9ze1IFkaZ5oqq5s675wLM90bd94ru98vwCCgDAwMgECjYDgdKQsS0hNBlTERTcJACH5BAUGAAEALDgAGQAWACIAAAMmGLrcOsPJqQC9OCuhu/9gKI5kaZ5oqq5s675wLI+DtZn2mItElgAAIfkEBQYAAQAsOAAZABYAHwAAAyAYutwKwMlJqxs26827/2AojmRpnmiqrmzrvjBIMAC2JQAh+QQFBgAFACw1ABkAGgAqAAADR1i63E3EyamEApFql7cHXiiGw1iAZqqubOu+cCzPdG3feK7vfAj8KJUgQCyWTMVFMeghSpykACXApFQd11yWYel5v+DCUZMAACH5BAUGAAUALDUAMwAOABAAAAMgWLqzviBICV6JMztsg1iBBSqVCF1mqq7syX5t3MLySiQAIfkEBQYABQAsNAAzAA4ADwAAAx9YqhAuYK0mSxCSVrbC7srwZWNpnuiHpWeEui3xpkICACH5BAUGAAUALDQAMQAOABIAAAMtSKoA0iuSQGsA0mqshNbLBxKDaA0EYFaougbdC0+yIs+3XdMvemO/kqxxEyQAACH5BAUGAAUALDMAKgASABoAAANUSLoz+1CBQAOIcFZ6sdpbF2lgIHhCWUEOoVYi0b4r89HB0+H5suO62+vkE76CLmAxSUPynEqJUdUi/Gii6ys7LXGZW9+FRySkwMebmecYcHiUVCABACH5BAUGAAUALDQAIQARACMAAANjWFqz/gqEGRpcks67NBVc4VFcNgKX6aGQqrGPq6UjZTmmM8HY9gSgxyDACTqGpQsxFUoRmrEnpgmQQhfWayF75UK9TXDotjC2IObYucTlRdXFdrhccD+sILtDHuKz61oFAhkJACH5BAUGAAUALDQAHwAMACEAAANAWAXRoLCIRh8cNEcWYrBLFxXiMioleK7RwL4wq64AAQs2nL/2fvor4MkVK8ZmJ+RIYJTEmCybUiF9EZvYzfGZAAAh+QQFBgABACw0ACAAAgACAAADA0hAkQAh+QQFBgABACw0AB8ABwAOAAACDIwVMMbtD6OctMLlCgAh+QQFBgABACwpAB8AFAAhAAADJxi6vARgkEbXaLPqXS//jQCOZGmeaKqubOu+cCzPdG3f2+CRogmkCQAh+QQFBgAFACwqAB8AEgAhAAADJli6vDSkSRUXmE1kTLn/YCiOZGmeaKqubOu+cCzP5QDcIBCwGpYAACH5BAUGAAUALCgAHwAUACIAAAMjWLrcRS4yIWsENuvNu/9gKI5kaZ5oqq5s674qpgxwFFQ0mAAAIfkEBQYAAwAsKAAgABUAIgAAAiWcjwjJPeuinBHSi7PevPsPhuJIluaJpuqaLkEgWFxACZ2s2V0BACH5BAUGAAAALCkAPQAHAAMAAAIGhIMByR0FACH5BAUGAAEALCkAPAALAAYAAAIMjDNzK6y4Foi02ioKACH5BAUGAAMALCgAPAALAAgAAAISnBEZYKYfHFQCTWXv0knw4HEFACH5BAUGAAIALCgAOwALAAgAAAIQlBEZYKYfHFQATXpxXpvvAgAh+QQFBgADACwoADoADAAJAAACE5wRGWCdPZaDkDHaBN68e351QgEAIfkEBQYAAgAsKAA5AAwACwAAAhWUERlgrc/Mm2a0i3OLuvsPhgIylQUAIfkEBQYABAAsKAA4AAwACwAAAxhICtEyKrZIApAUE5jjvR3FhWRpmmMpnAkAIfkEBQYABQAsKAA3AAwADQAAAx1YCtGisDTYBo23WIwBh8THeWIJPaaCikRgtmkqJAAh+QQFBgAFACwoADYADAAOAAADIkgE0QEqCkcjow6SiZ3qFQF64hh8JjoO5QiZTwsKMlh3QAIAIfkEBQYABQAsJwAvAA0AGAAAAzdIBNGgUIlGQ1yVishyix4VTSEXWlDnmeGjqJmLtemJzuVr16cLx7pe8DTM/YDHzCCptFEGzkYCACH5BAUGAAUALCYALAAOAB0AAANFSATRoBCy1kSENL+bqeVdM0RD6JBmsGHmqqQuAYMmHdodrkWpGgkzCDAVmdQkvWJvZQwxl8ikoumc9iiLayOrJZS632sCACH5BAUGAAUALCYAKwAMABUAAAMmWDoA+i9EB2WlBdenI4cQoXSPWJjgmYLD6r5wIcR03db4Ouc8NCQAIfkEBQYAAQAsKAArAAgACQAAAwwYNBMe4MlJq70YkwQAIfkEBQYAAQAsJgArAAoADAAAAxdIuszTCoQZmKRzYQrUnh3xjWRpniOQAAAh+QQFGAABACwnACwACgAOAAADEBhK2q7gPdGovDjrzbv/SwIAOw==" alt="" data-atf="1"></center>
<h1><center>STAY HOME.SAVE LIVES.</center></h1>
<h4><center>STAY home</center></h4>
<h4><center>KEEP a safe distance</center></h4>
<h4><center>WASH hands often</center></h4>
<h4><center>COVER your cough</center></h4>
<h4><center>SICK? Call the helpline</center></h4>
```
#Importing Libraries and Modules
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
from IPython.core.display import display, HTML
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import folium
import plotly.graph_objects as go
import seaborn as sns
import ipywidgets as widgets
```
# Importing Raw Data
```
#Collecting Data
death_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
confirmed_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
recovered_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
country_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/web-data/data/cases_country.csv')
```
# Death Cases
```
death_df.head()
```
# Confirmed Cases
```
confirmed_df.head()
```
# Recovered Cases
```
recovered_df.head()
```
# Cases According To The Countries
```
country_df.head()
#Manipulating Data
country_df.columns = map(str.lower, country_df.columns)
confirmed_df.columns = map(str.lower, confirmed_df.columns)
death_df.columns = map(str.lower, death_df.columns)
recovered_df.columns = map(str.lower, recovered_df.columns)
confirmed_df = confirmed_df.rename(columns={'province/state': 'state', 'country/region': 'country'})
recovered_df = recovered_df.rename(columns={'province/state': 'state', 'country/region': 'country'})
death_df = death_df.rename(columns={'province/state': 'state', 'country/region': 'country'})
country_df = country_df.rename(columns={'country_region': 'country'})
sorted_country_df = country_df.sort_values("confirmed",ascending=False).head(20)
```
# Countries with Highest Rates
```
def highlight_col(x):
a = 'background-color:#abbaab'
b = 'background-color:#e52d27'
c = 'background-color:#667db6'
d = 'background-color:#52c234'
e = 'background-color:#11998e'
temp_df = pd.DataFrame('',index=x.index,columns=x.columns)
temp_df.iloc[:,0]=a
temp_df.iloc[:,4]=c
temp_df.iloc[:,5]=b
temp_df.iloc[:,6]=d
temp_df.iloc[:,7]=e
return temp_df
sorted_country_df.style.apply(highlight_col ,axis=None)
fig = px.scatter(sorted_country_df.head(25),x="country" ,y="confirmed" ,size="confirmed" ,color="country" ,hover_name="country" ,size_max=70)
```
# Graphical Format
```
fig.show()
def plot_cases_for_country(country):
labels = ['confirmed', 'deaths']
colors = ['#667db6', '#e52d27']
mode_size = [6,8]
line_size = [4,5]
df_list = [confirmed_df, death_df]
fig = go.Figure()
for i, df in enumerate(df_list):
if country == 'World' or country == 'world':
x_data = np.array(list(df.iloc[:, 5:].columns))
y_data = np.sum(np.asarray(df.iloc[:, 5:]), axis=0)
else:
x_data = np.array(list(df.iloc[:, 5:].columns))
y_data = np.sum(np.asarray(df[df['country']==country].iloc[:, 5:]),axis=0)
fig.add_trace(go.Scatter(x=x_data, y=y_data, mode="lines+markers",name=labels[i],
line=dict(color=colors[i],width=line_size[i]),
connectgaps = True ,text='Total'+str(labels[i]) + ": "+ str(y_data[-1])
))
fig.show()
```
# Enter Country to View the Stats
```
interact(plot_cases_for_country, country="World");
```
# World Map
```
world_map = folium.Map(location=[11,0], tiles="cartodbpositron", zoom_start=2, max_zoom=5, min_zoom=2)
for i in range(len(confirmed_df)):
folium.Circle(
location=[confirmed_df.iloc[i]['lat'], confirmed_df.iloc[i]['long']],
fill = True,
radius = (int((np.log(confirmed_df.iloc[i,-1]+1.001))) + 0.7)*50000,
fill_color= 'blue',
color = 'red',
tooltip = "<div style='margin: 0; background-color: #2193b0; color: #200122;'>"+
"<h4 style='text-align:center;font-weight: bold'>"+confirmed_df.iloc[i]['country'] + "</h4>"
"<hr style='margin:10px;color: white;'>"+
"<ul style='color: white;;list-style-type:circle;align-item:left;padding-left:20px;padding-right:20px'>"+
"<li>Confirmed: "+str(confirmed_df.iloc[i,-1])+"</li>"+
"<li>Deaths: "+str(death_df.iloc[i,-1])+"</li>"+
"<li>Death Rate: "+ str(np.round(death_df.iloc[i,-1]/(confirmed_df.iloc[i,-1]+1.00001)*100,2))+ "</li>"+
"</ul></div>",
).add_to(world_map)
world_map
```
# Top 10 Countries with Affected Cases
```
px.bar(
sorted_country_df.head(10),
x = "country",
y = "deaths",
title= "Affected Cases",
color_discrete_sequence=["#fc466b"],
height=750,
width=1000
)
```
## Top 10 Countries with Recovered Cases
```
px.bar(
sorted_country_df.head(10),
x = "country",
y = "recovered",
title= "Recovered Cases",
color_discrete_sequence=["#56ab2f"],
height=750,
width=1000
)
```
## Top 10 Countries with Confirmed Cases
```
px.bar(
sorted_country_df.head(10),
x = "country",
y = "confirmed",
title= "Confirmed Cases",
color_discrete_sequence=["#8e2de2"],
height=750,
width=1000
)
```
## Resources
- https://en.wikipedia.org/wiki/2019%E2%80%9320_coronavirus_pandemic
- https://www.who.int/emergencies/diseases/novel-coronavirus-2019
- https://www.worldometers.info/coronavirus/
- https://github.com/CSSEGISandData/COVID-19
| github_jupyter |
#EOSC 582 Assignment V (SSMI)
```
__author__ = 'Yingkai (Kyle) Sha'
__email__ = 'yingkai@eos.ubc.ca'
import h5py
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
% matplotlib inline
import warnings
warnings.filterwarnings('ignore')
```
Function for histogram.
```
def hist_SSMI(CWV_unfixed, CWV_both, CWV_19, CWL_unfixed, CWL_both, CWL_19):
CWV_unfixed = CWV_unfixed.flatten(); CWV_both = CWV_both.flatten(); CWV_19 = CWV_19.flatten()
CWL_unfixed = CWL_unfixed.flatten(); CWL_both = CWL_both.flatten(); CWL_19 = CWL_19.flatten()
binCWV = np.arange(0, 70+1, 1); binCWL = np.arange(0, 0.7+0.01, 0.01)
fig = plt.figure(figsize=(16, 4))
ax1=plt.subplot2grid((1, 2), (0, 0), colspan=1, rowspan=1)
ax2=plt.subplot2grid((1, 2), (0, 1), colspan=1, rowspan=1)
ax1.hist(CWV_unfixed[~np.isnan(CWV_unfixed)], binCWV, color='y', linewidth=2.5, histtype='step', label='unfixed');
ax1.hist(CWV_both[~np.isnan(CWV_both)], binCWV, color='b', linewidth=2.5, histtype='step', label='fixed: both 19, 37 GHz');
ax1.hist(CWV_19[~np.isnan(CWV_19)], binCWV, color='r', linewidth=2.5, histtype='step', label='fixed: 19 GHz only');
ax1.legend(); ax1.grid(); ax1.set_xlabel('CWV', fontsize=12)
ax1.set_title('(a) unfixed v.s. fixed CWV Histogram', fontsize=12, fontweight='bold')
ax2.hist(CWL_unfixed[~np.isnan(CWL_unfixed)], binCWL, color='y', linewidth=2.5, histtype='step', label='unfixed');
ax2.hist(CWL_both[~np.isnan(CWL_both)], binCWL, color='b', linewidth=2.5, histtype='step', label='fixed: both 19, 37 GHz');
ax2.hist(CWL_19[~np.isnan(CWL_19)], binCWL, color='r', linewidth=2.5, histtype='step', label='fixed: 19 GHz only');
ax2.legend(); ax2.grid(); ax2.set_xlabel('CWL', fontsize=12)
ax2.set_title('(b) unfixed v.s. fixed CWL Histogram', fontsize=12, fontweight='bold')
```
Function for maps
```
def single_map(ax):
proj = Basemap(projection='moll', lon_0=180, resolution='c', ax=ax)
proj.drawcoastlines()
proj.drawmeridians(np.arange(0, 360, 60));
proj.drawparallels(np.arange(-90, 90, 30));
return proj
def SSMI_map(lon, lat, CWV_unfixed, CWV_both, CWV_19, CWL_unfixed, CWL_both, CWL_19):
levCWV = np.arange(0, 80+5, 5); levCWL = np.arange(0, 0.7+0.07, 0.07)
fig = plt.figure(figsize=(12, 8))
ax1=plt.subplot2grid((3, 2), (0, 0), colspan=1, rowspan=1); ax2=plt.subplot2grid((3, 2), (1, 0), colspan=1, rowspan=1)
ax3=plt.subplot2grid((3, 2), (2, 0), colspan=1, rowspan=1); ax4=plt.subplot2grid((3, 2), (0, 1), colspan=1, rowspan=1)
ax5=plt.subplot2grid((3, 2), (1, 1), colspan=1, rowspan=1); ax6=plt.subplot2grid((3, 2), (2, 1), colspan=1, rowspan=1)
proj=single_map(ax1); x, y = proj(lon, lat)
CS = proj.contourf(x, y, CWV_unfixed, levCWV, cmap=plt.cm.RdYlGn, extend='max')
ax1.set_title('(a.1) CWV unfixed (Jan 1990)', fontsize=12, fontweight='bold', y = 1.025)
proj=single_map(ax2); x, y = proj(lon, lat)
CS = proj.contourf(x, y, CWV_both, levCWV, cmap=plt.cm.RdYlGn, extend='max')
ax2.set_title('(a.2) CWV fixed: both (Jan 1990)', fontsize=12, fontweight='bold', y = 1.025)
proj=single_map(ax3); x, y = proj(lon, lat)
CS = proj.contourf(x, y, CWV_19, levCWV, cmap=plt.cm.RdYlGn, extend='max')
ax3.set_title('(a.3) CWV fixed: 19 GHz only (Jan 1990)', fontsize=12, fontweight='bold', y = 1.025)
cax = fig.add_axes([0.175, 0.05, 0.25, 0.02])
CBar = fig.colorbar(CS, cax=cax, orientation='horizontal')
CBar.ax.tick_params(axis='x', length=12.5)
CBar.set_label('CWV $\mathrm{kg/m^2}$', fontsize=12)
proj=single_map(ax4); x, y = proj(lon, lat)
CS = proj.contourf(x, y, CWL_unfixed, levCWL, cmap=plt.cm.gist_ncar_r, extend='max')
ax4.set_title('(b.1) CWL unfixed (Jan 1990)', fontsize=12, fontweight='bold', y = 1.025)
proj=single_map(ax5); x, y = proj(lon, lat)
CS = proj.contourf(x, y, CWL_both, levCWL, cmap=plt.cm.gist_ncar_r, extend='max')
ax5.set_title('(b.2) CWL fixed: both (Jan 1990)', fontsize=12, fontweight='bold', y = 1.025)
proj=single_map(ax6); x, y = proj(lon, lat)
CS = proj.contourf(x, y, CWL_19, levCWL, cmap=plt.cm.gist_ncar_r, extend='max')
ax6.set_title('(b.3) CWL fixed: 19 GHz only (Jan 1990)', fontsize=12, fontweight='bold', y = 1.025)
cax = fig.add_axes([0.6, 0.05, 0.25, 0.02])
CBar = fig.colorbar(CS, cax=cax, orientation='horizontal')
CBar.ax.tick_params(axis='x', length=12.5)
CBar.set_label('CWL', fontsize=12)
```
# Retrieval functions
SSMI.py including functions calculates emissivity and absorption coefficient at at 19 and 37 GHz SSMI channel.
Code are Python version of <a href='http://www.aos.wisc.edu/~tristan/aos740.php'>**UW-Madison AOS-704**</a> 's FORTRAN77 code.
```
import site
site.addsitedir('_libs')
from SSMI import *
```
Approximation of windspeed and main retrieval function in Greenwald et al., 1993.
```
# windspeed
def wind_speed(sst, t19v, t22v, t37h, t37v):
"""
input: sst (K), t19v (K), t22v (K), t37h (K)
output: windspeed (m/s)
"""
speed=1.0969*(t19v)-0.4555e0*(t22v)- 1.76*(t37v)+0.786*(t37h)+ 147.9
return speed
# retrival, based on EOSC 582 Website
def SSMI_retrieval(SST, theta, T19H, T19V, T22V, T37H, T37V, iter_num=5, correction='both'):
'''
Using 4 SSMI channel brightness temperature retrive total precipitable water and liquid water path
=========================================================================
CMV, CWL = SSMI_retrieval(SST, theta, T19H, T19V, T22V, T37H, T37V, iter_num=5)
-------------------------------------------------------------------------
Input:
SST: Sea Surface Temperature (K)
theta: Incidence angle
T#H: Brightness temperature in #GHz band with horizontal polarization
T#V: Brightness temperature in #GHz band with vertical polarization
iter_num: = 0 means no correction, > 0 applies correction to CWV > 25kg/m^2
Output:
CWV: Total precipitable water
CWL: Liquid water path
==========================================================================
Author:
Yingkai (Kyle) Sha
yingkai@eos.ubc.ca
'''
M, N = np.shape(SST)
# Parameters
mu = np.cos(theta*np.pi/180.) # Incidence angle
GAMMA = -5.8E-3 # Lapse rate: -5.8 K/km = -5.8E-3 K/m
Hw = 2.2E3 # Water vapor scaling height: 2.2km
# Correction for cold bias
# (Greenwald et al., 1993)
T37H= T37H + 3.58
T37V= T37V + 3.58
# delta T
dT19 = T19H - T19V
dT37 = T37H - T37V
# Frequency bands (GHz)
freq = [19, 22, 37, 85]
# Allocate memorise
emissv = np.empty([len(freq), M, N])
emissh = np.empty([len(freq), M, N])
KL19 = np.empty([M, N])
KL37 = np.empty([M, N])
KV19 = np.empty([M, N])
KV37 = np.empty([M, N])
TOX19 = np.empty([M, N])
TOX37 = np.empty([M, N])
# Emperical windspeed
windspeed = wind_speed(SST, T19V, T22V, T37H-3.58, T37V-3.58)
# Calculate emission, absorbtion coef.
for m in range(M):
for n in range(N):
for i in range(len(freq)):
emissv[i, m, n], emissh[i, m, n] = emiss(i+1, windspeed[m, n], SST[m, n], theta)
KL19[m, n], KL37[m, n], KV19[m, n], KV37[m, n], TOX19[m, n], TOX37[m, n] = coef(SST[m, n])
# Retrieve function
R37V=(1.0 - emissv[2, :, :])
R19V=(1.0 - emissv[0, :, :])
R37H=(1.0 - emissh[2, :, :])
R19H=(1.0 - emissh[0, :, :])
# Iteration correction of F19, F37 for CWV > 25kg/m^2
# Greenwald et al., 1993) equation (4)
CWV = np.zeros(SST.shape)
#CWL = np.zeros(SST.shape)
T019 = SST
T037 = SST
for iteration in range(iter_num):
hit = CWV > 25
# transmission
Tau19V = np.exp(-1*KV19*CWV/mu)
Tau37V = np.exp(-1*KV37*CWV/mu)
f19 = np.exp(50*KV19/mu)
f37 = np.exp(50*KV37/mu)
if iteration > 0:
# in the first timestep, T019, T037 = SST
T019[hit] = SST[hit] + GAMMA*Hw*(1-f19[hit]*Tau19V[hit]**2)*TOX19[hit]
if correction == 'both':
T037[hit] = SST[hit] + GAMMA*Hw*(1-f37[hit]*Tau37V[hit]**2)*TOX37[hit]
#T037[hit] = SST[hit]
# Correction
F19 = (T19H - T019)/(T19V - T019)
F37 = (T37H - T037)/(T37V - T037)
R1 = -1*mu/2.*np.log(dT19/(SST*R19V*(1-F19)*TOX19**2.))
R2 = -1*mu/2.*np.log(dT37/(SST*R37V*(1-F37)*TOX37**2.))
# Linear algebra
M = KV19*KL37 - KL19*KV37
CWV = (R1*KL37 - R2*KL19)/M
#print('iteration step = {}'.format(iteration))
# get CWL
CWL = (R2*KV19 - R1*KV37)/M
return CWV, CWL
theta = 53.1
# boardcasting because my retrival function supports 2D array
SST = 271.75*np.ones([1, 1])
T19H = 113.57*np.ones([1, 1])
T19V = 183.24*np.ones([1, 1])
T22V = 194.80*np.ones([1, 1])
T37H = 148.13*np.ones([1, 1])
T37V = 208.11*np.ones([1, 1])
SSMI_retrieval(SST, theta, T19H, T19V, T22V, T37H, T37V, iter_num=4, correction='both')
```
# Full Retrival
## Jan
```
TB_obj = h5py.File('_data/bright_temps.h5', 'r')
lat = TB_obj['lat'][:]
lon = TB_obj['lon'][:]
SST = TB_obj['jan/sst'][:]
T19H = TB_obj['jan/t19h'][:]
T19V = TB_obj['jan/t19v'][:]
T22V = TB_obj['jan/t22v'][:]
T37H = TB_obj['jan/t37h'][:]
T37V = TB_obj['jan/t37v'][:]
TB_obj.close()
theta = 53.1
CWV1_unfixed, CWL1_unfixed = SSMI_retrieval(SST, theta, T19H, T19V, T22V, T37H, T37V, iter_num=1)
CWV1_both, CWL1_both = SSMI_retrieval(SST, theta, T19H, T19V, T22V, T37H, T37V, iter_num=5, correction='both')
CWV1_19, CWL1_19 = SSMI_retrieval(SST, theta, T19H, T19V, T22V, T37H, T37V, iter_num=5, correction='19')
hist_SSMI(CWV1_unfixed, CWV1_both, CWV1_19, CWL1_unfixed, CWL1_both, CWL1_19)
SSMI_map(lon, lat, CWV1_unfixed, CWV1_both, CWV1_19, CWL1_unfixed, CWL1_both, CWL1_19)
```
## Jul
```
TB_obj = h5py.File('_data/bright_temps.h5', 'r')
SST = TB_obj['july/sst'][:]
T19H = TB_obj['july/t19h'][:]
T19V = TB_obj['july/t19v'][:]
T22V = TB_obj['july/t22v'][:]
T37H = TB_obj['july/t37h'][:]
T37V = TB_obj['july/t37v'][:]
TB_obj.close()
CWV7_unfixed, CWL7_unfixed = SSMI_retrieval(SST, theta, T19H, T19V, T22V, T37H, T37V, iter_num=1)
CWV7_both, CWL7_both = SSMI_retrieval(SST, theta, T19H, T19V, T22V, T37H, T37V, iter_num=5, correction='both')
CWV7_19, CWL7_19 = SSMI_retrieval(SST, theta, T19H, T19V, T22V, T37H, T37V, iter_num=5, correction='19')
hist_SSMI(CWV7_unfixed, CWV7_both, CWV7_19, CWL7_unfixed, CWL7_both, CWL7_19)
SSMI_map(lon, lat, CWV7_unfixed, CWV7_both, CWV7_19, CWL7_unfixed, CWL7_both, CWL7_19)
```
## Zonal mean results
```
CWV1z_19 = np.nanmean(CWV1_19, 1); CWL1z_19 = np.nanmean(CWL1_19, 1)
CWV7z_19 = np.nanmean(CWV7_19, 1); CWL7z_19 = np.nanmean(CWL7_19, 1)
CWV1z_both = np.nanmean(CWV1_both, 1); CWL1z_both = np.nanmean(CWL1_both, 1)
CWV7z_both = np.nanmean(CWV7_both, 1); CWL7z_both = np.nanmean(CWL7_both, 1)
CWV1z_unfixed = np.nanmean(CWV1_unfixed, 1); CWL1z_unfixed = np.nanmean(CWL1_unfixed, 1)
CWV7z_unfixed = np.nanmean(CWV7_unfixed, 1); CWL7z_unfixed = np.nanmean(CWL7_unfixed, 1)
fig = plt.figure(figsize=(14, 12))
ax1=plt.subplot2grid((2, 2), (0, 0), colspan=1, rowspan=1)
ax2=plt.subplot2grid((2, 2), (0, 1), colspan=1, rowspan=1)
ax3=plt.subplot2grid((2, 2), (1, 0), colspan=1, rowspan=1)
ax4=plt.subplot2grid((2, 2), (1, 1), colspan=1, rowspan=1)
ax1.plot(lat[:, 0], CWV1z_unfixed, color=[0, 0.2, 0.4], linewidth=3.5, label='Jan unfixed');
ax1.plot(lat[:, 0], CWV1z_both, color=[0, 0.5, 0.7], linewidth=3.5, label='Jan fixed: both');
ax1.plot(lat[:, 0], CWV1z_19, color=[0, 0.8, 1], linewidth=3.5, label='Jan fixed: 19 GHz only');
ax1.grid(); ax1.legend(loc=4); ax1.set_xlim(-90, 90);
ax1.set_title('(a) Zonal mean CWV | Jan', fontsize=12, fontweight='bold')
ax2.plot(lat[:, 0], CWV7z_unfixed, color=[0.4, 0.2, 0], linewidth=3.5, label='Jul unfixed');
ax2.plot(lat[:, 0], CWV7z_both, color=[0.7, 0.5, 0], linewidth=3.5, label='Jul fixed: both');
ax2.plot(lat[:, 0], CWV7z_19, color=[1, 0.8, 0], linewidth=3.5, label='Jul fixed: 19 GHz only');
ax2.grid(); ax2.legend(loc=4); ax2.set_xlim(-90, 90);
ax2.set_title('(b) Zonal mean CWV | Jul', fontsize=12, fontweight='bold')
ax3.plot(lat[:, 0], CWL1z_unfixed, color=[0, 0.2, 0.4], linewidth=3.5, label='Jan unfixed');
ax3.plot(lat[:, 0], CWL1z_both, color=[0, 0.5, 0.7], linewidth=3.5, label='Jan fixed: both');
ax3.plot(lat[:, 0], CWL1z_19, color=[0, 0.8, 1], linewidth=3.5, label='Jan fixed: 19 GHz only');
ax3.grid(); ax3.legend(loc=4); ax3.set_xlim(-90, 90);
ax3.set_title('(c) Zonal mean CWL | Jan', fontsize=12, fontweight='bold')
ax4.plot(lat[:, 0], CWL7z_unfixed, color=[0.4, 0.2, 0], linewidth=3.5, label='Jul unfixed');
ax4.plot(lat[:, 0], CWL7z_both, color=[0.7, 0.5, 0], linewidth=3.5, label='Jul fixed: both');
ax4.plot(lat[:, 0], CWL7z_19, color=[1, 0.8, 0], linewidth=3.5, label='Jul fixed: 19 GHz only');
ax4.grid(); ax4.legend(loc=4); ax4.set_xlim(-90, 90);
ax4.set_title('(d) Zonal mean CWL | Jul', fontsize=12, fontweight='bold')
```
| github_jupyter |
Author: Michael Gygli ([Github](https://github.com/gyglim), [Twitter](https://twitter.com/GygliMichael)), 2016-01-13
# Introduction #
This example demonstrates how to compute *C3D convolutional features* using Lasagne and Theano. C3D can be used as a general video feature and has shown strong performance. You can find more information in the paper [1] or the caffe-based reference implementation [2].
* [1]: D. Tran, L. Bourdev, R. Fergus, L. Torresani, and M. Paluri, Learning Spatiotemporal Features with 3D Convolutional Networks, ICCV 2015, http://vlg.cs.dartmouth.edu/c3d/c3d_video.pdf
* [2]: http://vlg.cs.dartmouth.edu/c3d/
# Preparation steps #
This demo uses the pretrained C3D weights, as well as the c3d module in the Lasagne Recipes modelzoo. Thus, you will need to get the Recipes from github (https://github.com/Lasagne/Recipes) first.
```
# Import models and set path
import sys
model_dir='../modelzoo/' # Path to your recipes/modelzoo
sys.path.insert(0,model_dir)
import c3d
import lasagne
import theano
# Download the weights and mean of the model
!wget -N https://s3.amazonaws.com/lasagne/recipes/pretrained/c3d/c3d_model.pkl
!wget -N https://s3.amazonaws.com/lasagne/recipes/pretrained/c3d/snipplet_mean.npy
# And the classes of Sports1m
!wget -N https://s3.amazonaws.com/lasagne/recipes/pretrained/c3d/labels.txt
# Finally, an example sniplet
!wget -N https://s3.amazonaws.com/lasagne/recipes/pretrained/c3d/example_snip.npy
# Build model
net = c3d.build_model()
# Set the weights (takes some time)
c3d.set_weights(net['prob'],'c3d_model.pkl')
# Load the video snipplet and show an example frame
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
snip=np.load('example_snip.npy')
plt.imshow(snip[0,:,:,:])
# Convert the video snipplet to the right format
# i.e. (nr in batch, channel, frameNr, y, x) and substract mean
caffe_snip=c3d.get_snips(snip,image_mean=np.load('snipplet_mean.npy'),start=0, with_mirrored=False)
# Compile prediction function
prediction = lasagne.layers.get_output(net['prob'], deterministic=True)
pred_fn = theano.function([net['input'].input_var], prediction, allow_input_downcast = True);
# Now we can get a prediction
probabilities=pred_fn(caffe_snip).mean(axis=0) # As we average over flipped and non-flipped
# Load labels
with open('labels.txt','r') as f:
class2label=dict(enumerate([name.rstrip('\n') for name in f]))
# Show the post probable ones
print('Top 10 class probabilities:')
for class_id in (-probabilities).argsort()[0:10]:
print('%20s: %.2f%%' % (class2label[class_id],100*probabilities[class_id]))
```
#### Comparison to C3D reference implementation ####
For this example, the Top 10 probabilities of the original C3D implementation are:
wiffle ball: 29.91%
knife throwing: 13.11%
croquet: 11.27%
disc golf: 5.29%
kickball: 5.18%
rounders: 4.48%
bocce: 3.53%
dodgeball: 2.27%
boomerang: 1.71%
tee ball: 1.39%
| github_jupyter |
```
import numpy as np
import pandas as pd
import seaborn as sns
import glob
import os
import matplotlib.pyplot as plt
import shutil
from prediction_utils.util import df_dict_concat, yaml_read, yaml_write
project_dir = "/share/pi/nigam/projects/spfohl/cohorts/admissions/starr_20200523"
os.listdir(os.path.join(project_dir, 'experiments'))
experiment_name = 'baseline_tuning_fold_1_10'
baseline_files = glob.glob(
os.path.join(
project_dir,
'experiments',
experiment_name,
'**',
'result_df_training_eval.parquet'
),
recursive=True
)
baseline_df_dict = {
tuple(file_name.split('/'))[-4:-1]: pd.read_parquet(file_name)
for file_name in baseline_files
}
baseline_df = df_dict_concat(baseline_df_dict,
['task', 'config_filename', 'fold']
)
baseline_df.head()
assert (
baseline_df
.groupby(['task', 'config_filename'])
.agg(num_folds = ('fold', lambda x: len(x.unique())))
.query('num_folds != 10')
.shape[0]
) == 0
mean_performance = (
pd.DataFrame(
baseline_df
.query('metric == "loss" & phase == "val"')
.groupby(['config_filename', 'task'])
.agg(performance=('performance', 'mean'))
.reset_index()
)
)
best_model = (
mean_performance
.groupby('task')
.agg(performance=('performance','min'))
.merge(mean_performance)
)
# mean_performance
# mean_performance = (
# pd.DataFrame(
# baseline_df
# .query('metric == "loss" & phase == "val"')
# .groupby(['config_filename', 'task'])
# .agg({'performance': 'mean', 'config_filename': lambda x: x.array[-1], 'task': lambda x: x.array[-1]})
# .reset_index(drop=True)
# )
# )
# best_model = pd.DataFrame(mean_performance.groupby(['task']).performance.agg('min')).reset_index().merge(mean_performance)
# best_model
best_model_config_df = best_model[['config_filename', 'task']]
best_model_performance = baseline_df.merge(best_model_config_df)
best_model_performance[['task', 'config_filename']].drop_duplicates()
best_model_config_df
baseline_df
base_config_path = os.path.join(project_dir, 'experiments', experiment_name, 'config')
selected_config_path = os.path.join(project_dir, 'experiments', experiment_name, 'config', 'selected_models')
# Write to a new directory
for i, row in best_model_config_df.iterrows():
the_config = yaml_read(os.path.join(base_config_path, row.task, row.config_filename))
print(the_config)
the_config['label_col'] = row.task
os.makedirs(os.path.join(selected_config_path, row.task), exist_ok=True)
yaml_write(the_config, os.path.join(selected_config_path, row.task, row.config_filename))
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
def r2(x, y):
return stats.pearsonr(x, y)[0] ** 2
%matplotlib inline
```
# Preparing Data
```
test_counts = {'yes': 256, 'no': 252, 'up': 272, 'down': 253, 'left': 267, 'right': 259, 'on': 246, 'off': 262, 'stop': 249, 'go': 251}
param_counts = {
'cnn_one_fstride4': {
'params': 220000,
'multiplies': 1430000
},
'cnn_one_fstride8': {
'params': 337000,
'multiplies': 1430000
},
'cnn_tpool2': {
'params': 1090000,
'multiplies': 103000000
},
'cnn_tpool3': {
'params': 823000,
'multiplies': 73700000
},
'cnn_trad_fpool3': {
'params': 1370000,
'multiplies': 125000000
},
'google-speech-dataset-compact': {
'params': 964000,
'multiplies': 5760000
},
'google-speech-dataset-full': {
'params': 1380000,
'multiplies': 98800000
}
}
def get_observations(fname):
observations = {'model': [], 'keyword': [], 'accuracy': [], 'time': [], 'total_energy': [], 'peak_power': [], 'params': [], 'multiplies': []}
with open(fname, 'r') as f:
for _ in range(7):
for i in range(10):
line = f.readline().rstrip()
parts = line.split(' ')
model, keyword, accuracy, time, total_energy, peak_power = parts
model = model.rstrip('\.onnx')
accuracy, time, total_energy, peak_power = list(map(float, [accuracy, time, total_energy, peak_power]))
accuracy *= 100
total_energy = 1000 * (total_energy - 1.9*time)
time *= 1000
peak_power -= 1.9
observations['model'].append(model)
observations['keyword'].append(keyword)
observations['accuracy'].append(accuracy)
observations['time'].append(time / test_counts[keyword])
observations['total_energy'].append(total_energy / test_counts[keyword])
observations['peak_power'].append(peak_power)
observations['params'].append(param_counts[model]['params'])
observations['multiplies'].append(param_counts[model]['multiplies'])
for i in range(6):
line = f.readline()
return observations
df = pd.DataFrame(get_observations('experiment_output_e2e.txt'))
df.head()
df_pre = pd.DataFrame(get_observations('experiment_output_preprocessing.txt'))
df_pre.head()
```
# Analysis
```
df_grouped = df.groupby('model')
df_grouped_means = df_grouped['accuracy', 'total_energy', 'peak_power', 'time', 'params', 'multiplies'].mean()
df_grouped_means.round(2)
df_pre_grouped = df_pre.groupby('model')
df_pre_grouped_means = df_pre_grouped['accuracy', 'total_energy', 'peak_power', 'time', 'params', 'multiplies'].mean()
df_pre_grouped_means
df_pre_grouped_means['time'].mean()
df_pre_grouped_means['total_energy'].mean()
df_pre_grouped_means['peak_power'].mean()
df_inf_only = df_grouped_means - df_pre_grouped_means
df_inf_only['peak_power'] = df_grouped_means['peak_power']
df_inf_only['params'] = df_grouped_means['params']
df_inf_only['multiplies'] = df_grouped_means['multiplies']
df_inf_only.round(2)
dims = (14, 6)
fig, ax = plt.subplots(figsize=dims)
g = sns.factorplot(x="accuracy", y="total_energy", hue="model", data=df, ax=ax)
g.set(xlim=(0, None), ylim=(0, None))
for ind, label in enumerate(ax.get_xticklabels()):
if ind % 10 == 0: # every 10th label is kept
label.set_visible(True)
else:
label.set_visible(False)
```
# Visualizations
## Energy vs. Multiplies
```
df_inf_aggregated = df_inf_only.reset_index()
ax = sns.regplot(x=df['params'], y=df['total_energy'])
ax = sns.regplot(x=df['multiplies'], y=df['total_energy'])
df.to_csv('observations.csv', index=False)
df_inf_aggregated.to_csv('observations_agg.csv', index=False)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/daemon-Lee/simplex_method_for_linear_program/blob/master/project/simplex_method/Simplex_method.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Copyright 2020 Duy L.Dinh. { display-mode: "form" }
#@markdown CS1302 HE130655.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Project MAO302
This is final project of MAO302 Course make by FPT University
## Part 1: simplex method for linear program (LP)
```
import numpy as np
np.random.seed(2020)
```
### Generate input matrices of a standard linear program in matrix from
```
def gen_problem(n_var, n_contrain):
contrain = np.random.randint(low=-7, high=19, size=(n_var,n_contrain))
bacis = np.eye(n_contrain)
# A will contain the coefficients of the constraints
A = np.vstack((contrain,bacis)).T
# b will contain the amount of resources
b = np.random.randint(low=-7, high=19, size=(n_contrain,))
# c will contain coefficients of objective function Z
cz = np.random.randint(low=-7, high=19, size=(n_var,))
cb = np.zeros((n_contrain,))
c = np.concatenate([cz,cb])
return A, b, c
```
### Write a code to solve the generated LP using to phase simplex method in matrix form
```
#@title THE SIMPLEX METHOD IN MATRIX NOTATION
class Simplex_method:
#@markdown First input A, b, c, where:
#@markdown - **A** will contain the coefficients of the constraints
#@markdown - **b** will contain the amount of resources
#@markdown - **c** will contain coefficients of objective function Z
def __init__(self, A, b, c):
self.A = A
self.c = c
self.B = 0
self.n = 0
#@markdown Generate *B* and *N*
#@markdown - **B** will contain the Basic set
#@markdown - **n** will contain the nonbasic set
n_contrain = len(self.A)
n_var = len(self.c) - n_contrain
self.B = np.arange(n_var, n_var + n_contrain)[np.newaxis].T
self.n = np.arange(0, n_var)[np.newaxis].T
#@markdown - The initial values of the basic variables: xb = b
self.xb = np.transpose([b])
#@markdown - The initial values of the nonbasic dual variables: zn = -cn
self.zn = -self.c[self.n]
self.status = 'Optimal'
self.objective = 0
def solve(self, verbor=False):
self.count = 0
for i in self.n:
if True not in (self.A[:, i] > 0) and self.c[i] > 0:
print("Unbounded")
self.status = 'Unbounded'
sol = np.zeros(len(self.c))
sol[self.B] = self.xb
return {
'status': self.status,
"iter": self.count,
"objective": self.objective,
"sol": sol
}
#@markdown Find solution for problem
#@markdown - Check for Optimality. If xb ≥ 0 and zn ≥ 0, stop. The current
#@markdown solution is optimal.
if False not in (self.xb >= 0) and False not in (self.zn <= 0):
print("Optimal — the problem was trivial")
sol = np.zeros(len(self.c))
sol[self.B] = self.xb
return {
'status': self.status,
"iter": self.count,
"objective": self.objective,
"sol": sol
}
#@markdown - Since xb ≥ 0, the initial solution is **Primal feasible**, and hence
#@markdown we can apply the simplex method without needing any Phase I procedure.
elif False not in (self.xb >= 0) and False in (self.zn <= 0):
print("primal feasible")
print("run primal simplex method")
result = self.primal_simplex(verbor=verbor)
#@markdown - Since xb ≥ 0, the initial solution is **Dual feasible**
elif False in (self.xb >= 0) and False not in (self.zn <= 0):
print("run dual simplex method")
result = self.solve_two_phase(verbor=verbor)
#@markdown - Where both xb and cn have components of the wrong sign.
#@markdown In this case, we must employ a **two-phase procedure**.
else:
print("dual feasible")
print("Start convert negative components")
# self.zn = np.maximum(self.zn, -self.zn)
# self.zn = np.maximum(self.zn, 0)
print("run two phase simplex method")
result = self.solve_two_phase(verbor=verbor)
return result
def solve_two_phase(self, verbor=False):
#@markdown - In Phase I apply the dual simplex method to find an optimal solution
#@markdown of this modified problem Phase I is most likely not optimal, but it
#@markdown is feasible, and therefore the primal simplex method can be used to
#@markdown find the optimal solution to the original problem.
print("Phase one")
result = self.dual_simplex(verbor=verbor)
if result['status'] == 'Infeasible':
return result
print("Phase two")
result = self.primal_simplex(verbor=verbor)
return result
def primal_simplex(self, verbor=False):
objective = -np.inf
count = 0
Bi = self.A[:, self.B].reshape((-1, len(self.B)))
N = self.A[:, self.n].reshape((-1, len(self.n)))
if verbor:
A_hat = np.concatenate([self.B.T, self.xb.T, N.T, Bi.T]).T
print("Objective\n", np.concatenate([self.zn, self.xb]).T)
print("Dictionary\n", A_hat)
while(np.min(self.zn) < 0):
j = np.argmin(self.zn)
ej = np.zeros((1, len(self.zn))).T
ej[j] = 1
delta_xb = np.linalg.inv(Bi).dot(N).dot(ej)
t = np.max(delta_xb/self.xb)**-1
if t < 0 or t == np.inf:
self.status = 'Unbounded'
sol = np.zeros(len(self.c))
sol[self.B] = self.xb
return {
'status': self.status,
"iter": self.count,
"objective": self.objective,
"sol": sol
}
i = np.argmax(delta_xb/self.xb)
ei = np.zeros((1, len(self.xb))).T
ei[i] = 1
delta_zn = -(np.linalg.inv(Bi).dot(N)).T.dot(ei)
s = self.zn[j]/delta_zn[j]
self.xb = self.xb - t*delta_xb
self.zn = self.zn - s*delta_zn
self.xb[i] = t
self.zn[j] = s
# pivot swap
pivot = self.B[i].copy()
self.B[i] = self.n[j].copy()
self.n[j] = pivot
Bi = self.A[:, self.B].reshape((-1, len(self.B)))
N = self.A[:, self.n].reshape((-1, len(self.n)))
count += 1
self.count += 1
self.objective = self.xb.T.dot(self.c[self.B]).reshape(-1)[0]
if verbor:
A_hat = np.concatenate([self.B.T, self.xb.T, N.T, Bi.T]).T
print("iter:", count)
print("Dictionary\n", A_hat)
print("objective:", self.objective)
if self.objective > objective:
objective = self.objective
else:
self.status = 'Infeasible'
sol = np.zeros(len(self.c))
sol[self.B] = self.xb
return {
'status': self.status,
"iter": self.count,
"objective": self.objective,
"sol": sol
}
sol = np.zeros(len(self.c))
sol[self.B] = self.xb
return {
'status': self.status,
"iter": self.count,
"optimal": self.objective,
"sol": sol
}
def dual_simplex(self, verbor=False):
objective = np.inf
count = 0
Bi = self.A[:, self.B].reshape((-1, len(self.B)))
N = self.A[:, self.n].reshape((-1, len(self.n)))
if verbor:
A_hat = np.concatenate([self.B.T, self.xb.T, N.T, Bi.T]).T
print("Objective\n", np.concatenate([self.zn, self.xb]).T)
print("Dictionary\n", A_hat)
while(np.min(self.xb) < 0):
i = np.argmin(self.xb)
ei = np.zeros((1, len(self.xb))).T
ei[i] = 1
delta_zn = -(np.linalg.inv(Bi).dot(N)).T.dot(ei)
s = np.max(delta_zn/self.zn)**-1
j = np.argmax(delta_zn/self.zn)
ej = np.zeros((1, len(self.zn))).T
ej[j] = 1
delta_xb = np.linalg.inv(Bi).dot(N).dot(ej)
t = self.xb[i]/delta_xb[i]
self.xb = self.xb - t*delta_xb
self.zn = self.zn - s*delta_zn
self.xb[i] = t
self.zn[j] = s
# pivot
pivot = self.B[i].copy()
self.B[i] = self.n[j].copy()
self.n[j] = pivot
Bi = self.A[:, self.B].reshape((-1, len(self.B)))
N = self.A[:, self.n].reshape((-1, len(self.n)))
A_hat = np.concatenate([self.B.T, self.xb.T, N.T, Bi.T]).T
count += 1
self.count += 1
self.objective = self.xb.T.dot(self.c[self.B]).reshape(-1)[0]
if verbor:
A_hat = np.concatenate([self.B.T, self.xb.T, N.T, Bi.T]).T
print("iter:", count)
print("Dictionary\n", A_hat)
print("objective:", self.objective)
if self.objective < objective:
objective = self.objective
else:
self.status = 'Infeasible'
sol = np.zeros(len(self.c))
sol[self.B] = self.xb
return {
'status': self.status,
"iter": self.count,
"objective": self.objective,
"sol": sol
}
sol = np.zeros(len(self.c))
sol[self.B] = self.xb
return {
'status': self.status,
"iter": self.count,
"objective": self.objective,
"sol": sol
}
print("Exercise 2.3")
# A will contain the coefficients of the constraints
A = np.array([[-1, -1, -1, 1, 0],
[2, -1, 1, 0, 1]])
# b will contain the amount of resources
b = np.array([-2, 1])
# c will contain coefficients of objective function Z
c = np.array([2, -6, 0, 0, 0])
simplex = Simplex_method(A, b, c)
print(simplex.solve(verbor=True))
```
### Solve the genarated LP by a pulp and cplex tool
#### pulp lib
Install pulp
```
!pip install pulp
print("Exercise 2.3")
# A will contain the coefficients of the constraints
A = np.array([[-1, -1, -1, 1, 0],
[2, -1, 1, 0, 1]])
# b will contain the amount of resources
b = np.array([-2, 1])
# c will contain coefficients of objective function Z
c = np.array([2, -6, 0, 0, 0])
import pulp as p
# Generate B and N
n_contrain = len(A)
n_var = len(c) - n_contrain
B = np.arange(n_var, n_var + n_contrain)[np.newaxis].T
n = np.arange(0, n_var)[np.newaxis].T
# Create a LP Minimization problem
Lp_prob = p.LpProblem('Problem', p.LpMaximize)
# Create problem Variables
x = [p.LpVariable("x"+str(i), lowBound = 0) for i in range(1,n_var+1)]
# Objective Function
objective = 0
for i in range(n_var):
objective += c[i]*x[i]
Lp_prob += objective
# Constraints:
for i in range(n_contrain):
contrain = 0
for j in range(n_var):
contrain += A[i,j]*x[j] <= b[i]/n_var
Lp_prob += contrain
# Display the problem
print(Lp_prob)
status = Lp_prob.solve() # Solver
print(p.LpStatus[status]) # The solution status
# Printing the final solution
print(p.value(x[0]), p.value(x[1]), p.value(x[2]), p.value(Lp_prob.objective))
import pulp as p
def pulp_lib(A, b, c, verbor=False):
# Generate B and N
n_contrain = len(A)
n_var = len(c) - n_contrain
B = np.arange(n_var, n_var + n_contrain)[np.newaxis].T
n = np.arange(0, n_var)[np.newaxis].T
# Create a LP Minimization problem
Lp_prob = p.LpProblem('Problem', p.LpMaximize)
# Create problem Variables
x = [p.LpVariable("x"+str(i), lowBound = 0) for i in range(1,n_var+1)]
# Objective Function
objective = 0
for i in range(n_var):
objective += c[i]*x[i]
Lp_prob += objective
# Constraints:
for i in range(n_contrain):
contrain = 0
for j in range(n_var):
contrain += A[i,j]*x[j] <= b[i]/n_var
Lp_prob += contrain
status = Lp_prob.solve() # Solver
if verbor:
print(p.LpStatus[status]) # The solution status
# Printing the final solution
print(p.value(Lp_prob.objective))
return {
'status': p.LpStatus[status],
'objective': p.value(Lp_prob.objective)
}
```
#### cplex
```
!pip install cplex
import cplex
def cplex_lib(A, b, c):
# Input all the data and parameters here
num_constraints = len(A)
num_decision_var = len(c) - num_constraints
n = np.arange(0, num_decision_var)[np.newaxis].T
A = A[:,n.T].reshape(num_constraints, num_decision_var).tolist()
b = b.tolist()
c = c[n].T.reshape(len(n)).tolist()
# constraint_type = ["L", "L", "L"] # Less, Greater, Equal
constraint_type = ["L"]*num_constraints
# ============================================================
# Establish the Linear Programming Model
myProblem = cplex.Cplex()
# Add the decision variables and set their lower bound and upper bound (if necessary)
myProblem.variables.add(names= ["x"+str(i) for i in range(num_decision_var)])
for i in range(num_decision_var):
myProblem.variables.set_lower_bounds(i, 0.0)
# Add constraints
for i in range(num_constraints):
myProblem.linear_constraints.add(
lin_expr= [cplex.SparsePair(ind= [j for j in range(num_decision_var)], val= A[i])],
rhs= [b[i]],
names = ["c"+str(i)],
senses = [constraint_type[i]]
)
# Add objective function and set its sense
for i in range(num_decision_var):
myProblem.objective.set_linear([(i, c[i])])
myProblem.objective.set_sense(myProblem.objective.sense.maximize)
# Solve the model and print the answer
myProblem.solve()
return{
'objective': myProblem.solution.get_objective_value(),
'status': myProblem.solution.get_status_string(),
'sol': myProblem.solution.get_values()
}
print("Exercise 2.3")
# A will contain the coefficients of the constraints
A = np.array([[-1, -1, -1, 1, 0],
[2, -1, 1, 0, 1]])
# b will contain the amount of resources
b = np.array([-2, 1])
# c will contain coefficients of objective function Z
c = np.array([2, -6, 0, 0, 0])
cplex_lib(A, b, c)
```
### Repeat (1)-(3) one hundred timnes and compare the mean and standard deviation of running time of your code with those of the chosen tool.
```
n_sample = 100
np.random.seed(2020)
A_list = []
b_list = []
c_list = []
for i in range(n_sample):
n_var = np.random.randint(low=2, high=7)
n_contrain = np.random.randint(low=2, high=7)
A, b, c = gen_problem(n_var, n_contrain)
A_list.append(A)
b_list.append(b)
c_list.append(c)
from time import time
running_time_pulp = []
output_pulp = []
for i in range(n_sample):
start = time()
output_pulp.append(pulp_lib(A, b, c, verbor=False))
end = time() - start
running_time_pulp.append(end)
running_time_cplex = []
output_cplex = []
for i in range(n_sample):
start = time()
output_cplex.append(pulp_lib(A, b, c, verbor=False))
end = time() - start
running_time_cplex.append(end)
running_time_simplex_method = []
output_simplex_method= []
for i in range(n_sample):
start = time()
simplex = Simplex_method(A, b, c)
output_simplex_method.append(simplex.solve(verbor=False))
end = time() - start
running_time_simplex_method.append(end)
#@title Compare pulp and Simplex method
# Simplex method
mean_Simplex_method = np.mean(running_time_simplex_method)
std_Simplex_method = np.std(running_time_simplex_method)
# pulp
mean_pulp = np.mean(running_time_pulp)
std_pulp = np.std(running_time_pulp)
# cplex
mean_cplex = np.mean(running_time_cplex)
std_cplex = np.std(running_time_cplex)
print("mean running time of pulp - simplex_method (s):", mean_pulp - mean_Simplex_method)
print("standard deviation running time of pulp - simplex_method (s):", std_pulp - std_Simplex_method)
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
mean = np.array([mean_Simplex_method, mean_pulp, mean_cplex])[np.newaxis]
ax = sns.heatmap(mean, annot=True)
plt.title("Compare mean")
ax.set_xticklabels(['code','pulp','cplex'])
std = np.array([std_Simplex_method, std_pulp, std_cplex])[np.newaxis]
ax = sns.heatmap(std, annot=True)
plt.title("Compare standard deviation")
ax.set_xticklabels(['code','pulp','cplex'])
```
| github_jupyter |
# How to automate IP ranges calculations in Azure using PowerShell
> The notebook does work on Linux and [](https://mybinder.org/v2/gh/eosfor/scripting-notes/HEAD)
## Scenario
Assume we got the IP rage of `10.172.0.0/16` from the network team for planned Azure Landing Zones. We want to automate this by making a tool, which will automatically calculate IP ranges for us given some high level and simple to understand details about the future networks.
This notebook shows how to do it using the [ipmgmt](https://github.com/eosfor/ipmgmt) module. So let us go and install it first
```
Install-Module ipmgmt -Scope CurrentUser
```
And import it
```
Import-Module ipmgmt
```
The module contains only two cmdlets. The `Get-VLSMBreakdown` breaks down a range into smaller ones, meaning we can use it to break a range into VNETs and then each VNET into subnets. The `Get-IPRanges` cmdlet given the list of ranges in-use and the "root" range tries to find a free slot of the specified size. It can be used to avoid losing IP space
```
Get-Command -Module ipmgmt
```
Let us look at how we can break down our big "root" IP range into smaller ones. For that, we just need to prepare a list of smaller sub-ranges in the form of PowerShell hashtables, like so `@{type = "VNET-HUB"; size = (256-2)}`. Here we say that the name of the range is `VNET-HUB` and the size is simply `256-2`, which is the maximum number of IPs in the `/24` subnet minus 2, the first and the last.
If we need more than one, we just make an array of these hashtables
```
$subnets = @{type = "VNET-HUB"; size = (256-2)},
@{type = "VNET-A"; size = (256-2)}
```
Now everything is ready and we can try to break the "root" network
```
Get-VLSMBreakdown -Network 10.172.0.0/16 -SubnetSize $subnets | ft type, network, netmask, *usable, cidr -AutoSize
```
Here we got two ranges named `VNET-A` and `VNET-HUB`. However, by doing so we made a few unused slots in the `root` range. They are marked as `reserved`, just for our convenience. It shows what happens to the range when we break it down. The smaller sub-ranges you make, the more of such unused ranges you get in the end.
Ok, let's try to use what we've got. For that, we need to authenticate to Azure. When running locally you can just do
```
Login-AzAccount
```
In Binder however, it needs to be a bit different, like this
```
Connect-AzAccount -UseDeviceAuthentication
```
Once authenticated, we can create networks, for example, like this. Here we first filter out the `reserved` ones for simplicity.
```
$vnets = Get-VLSMBreakdown -Network 10.172.0.0/16 -SubnetSize $subnets | ? type -ne 'reserved'
$vnets | % {
New-AzVirtualNetwork -Name $_.type -ResourceGroupName 'vnet-test' `
-Location 'eastus2' -AddressPrefix "$($_.Network)/$($_.cidr)" | select name, AddressSpace, ResourceGroupName, Location
}
```
Ok, assume, at some point, we need to add a few more networks. And at the same time e may want to reuse one of those `reserved` slots, if it matches the size. This is what `Get-IPRanges` does. It takes a lit of IP ranges "in-use" and returns slots that can fit the range in question. For example, in our case, we have a "base range" of `10.10.0.0/16` and two ranges in-use `10.10.5.0/24`, `10.10.7.0/24`. We are looking for a range of the size `/22`. So the cmdlet recommends us to use the `10.172.4.0/22`, which is one of the `reserved` ranges from the previous example
```
Get-IPRanges -Networks "10.172.1.0/24", "10.172.0.0/24" -CIDR 22 -BaseNet "10.172.0.0/16" | ft -AutoSize
```
What if we need to find more than just one range at a time? Need not worry. We can do it with this simple script. And we are using Azure as a source of truth, as we can always query it for the real IP ranges, which are in use.
So what we need to do is relatively simple:
- make a list of sizes we want to create and put it into a variable - `$cidrRange`
- pull the ranges from Azure. We assume they are in use by someone - `$existingRanges`
- cast whatever we pulled from azure the `System.Net.IPNetwork` for correctness. This type is used inside the `ipmgmt` module to store information about networks and do all the calculations, comparisons, etc.
- now we run through the list of sizes, for each of them ask `Get-IPRanges` to find a proper slot, and accumulate the results
Now we just need to mark the new ranges as `free`, to see what we've got. For that, we compare what we have in Azure to what we just calculated and mark the difference accordingly
```
$cidrRange = 25,25,24,24,24,24,23,25,26,26 | sort
$existingRanges = (Get-AzVirtualNetwork -ResourceGroupName vnet-test |
select name, @{l = "AddressSpace"; e = { $_.AddressSpace.AddressPrefixes }}, ResourceGroupName, Location |
select -expand AddressSpace)
$existingNetworks = $existingRanges | % {[System.Net.IPNetwork]$_}
$nets = $existingRanges
$ret = @()
$cidrRange | % {
$ret = Get-IPRanges -Networks $nets -CIDR $_ -BaseNet "10.172.0.0/16"
$nets = ($ret | select @{l="range"; e = {"$($_.network)/$($_.cidr)"}}).range
}
$ret | % {
if ( -not ($_ -in $existingNetworks)) {$_.IsFree = $true}
}
$ret | ft -AutoSize
```
And this gives us all the necessary information to add a few more networks.
| github_jupyter |
TSG001 - Run azdata copy-logs
=============================
Steps
-----
### Common functions
Define helper functions used in this notebook.
```
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {}
error_hints = {}
install_hint = {}
first_run = True
rules = None
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""
Run shell command, stream stdout, print stderr and optionally return output
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
line_decoded = line.decode()
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
if rules is not None:
apply_expert_rules(line_decoded)
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
try:
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
j = load_json("tsg001-copy-logs.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
global rules
for rule in rules:
# rules that have 9 elements are the injected (output) rules (the ones we want). Rules
# with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029,
# not ../repair/tsg029-nb-name.ipynb)
if len(rule) == 9:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
# print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
# print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond'], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['no such host', 'TSG011 - Restart sparkhistory server', '../repair/tsg011-restart-sparkhistory-server.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']], 'azdata': [['azdata login', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb'], 'azdata': ['SOP055 - Install azdata command line interface', '../install/sop055-install-azdata.ipynb']}
```
### Get the Kubernetes namespace for the big data cluster
Get the namespace of the big data cluster use the kubectl command line
interface .
NOTE: If there is more than one big data cluster in the target
Kubernetes cluster, then set \[0\] to the correct value for the big data
cluster.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
else:
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
```
### Run copy-logs
```
import os
import tempfile
import shutil
target_folder = os.path.join(tempfile.gettempdir(), "copy-logs", namespace)
if os.path.isdir(target_folder):
shutil.rmtree(target_folder)
```
### View the `--help` options
```
run(f'azdata bdc debug copy-logs --help')
```
### Run the `copy-logs`
NOTES:
1. The –timeout option does not work on Windows
2. Use –skip-compress on Windows if no utility available to uncompress
.tar.gz files.
```
run(f'azdata bdc debug copy-logs --namespace {namespace} --target-folder {target_folder} --exclude-dumps --skip-compress --verbose')
print(f'The logs are available at: {target_folder}')
print('Notebook execution complete.')
```
| github_jupyter |
# CrowdTruth for Multiple Choice Tasks: Relation Extraction
In this tutorial, we will apply CrowdTruth metrics to a **multiple choice** crowdsourcing task for **Relation Extraction** from sentences. The workers were asked to read a sentence with 2 highlighted terms, then pick from a multiple choice list what are the relations expressed between the 2 terms in the sentence. The task was executed on [FigureEight](https://www.figure-eight.com/). For more crowdsourcing annotation task examples, click [here](https://raw.githubusercontent.com/CrowdTruth-core/tutorial/getting_started.md).
To replicate this experiment, the code used to design and implement this crowdsourcing annotation template is available here: [template](https://raw.githubusercontent.com/CrowdTruth/CrowdTruth-core/master/tutorial/templates/Relex-Multiple-Choice/template.html), [css](https://raw.githubusercontent.com/CrowdTruth/CrowdTruth-core/master/tutorial/templates/Relex-Multiple-Choice/template.css), [javascript](https://raw.githubusercontent.com/CrowdTruth/CrowdTruth-core/master/tutorial/templates/Relex-Multiple-Choice/template.js).
This is a screenshot of the task as it appeared to workers:

A sample dataset for this task is available in [this file](https://raw.githubusercontent.com/CrowdTruth/CrowdTruth-core/master/tutorial/data/relex-multiple-choice.csv), containing raw output from the crowd on FigureEight. Download the file and place it in a folder named `data` that has the same root as this notebook. Now you can check your data:
```
import pandas as pd
test_data = pd.read_csv("../data/relex-multiple-choice.csv")
test_data.head()
```
## Declaring a pre-processing configuration
The pre-processing configuration defines how to interpret the raw crowdsourcing input. To do this, we need to define a configuration class. First, we import the default CrowdTruth configuration class:
```
import crowdtruth
from crowdtruth.configuration import DefaultConfig
```
Our test class inherits the default configuration `DefaultConfig`, while also declaring some additional attributes that are specific to the Relation Extraction task:
* **`inputColumns`:** list of input columns from the .csv file with the input data
* **`outputColumns`:** list of output columns from the .csv file with the answers from the workers
* **`annotation_separator`:** string that separates between the crowd annotations in `outputColumns`
* **`open_ended_task`:** boolean variable defining whether the task is open-ended (i.e. the possible crowd annotations are not known beforehand, like in the case of free text input); in the task that we are processing, workers pick the answers from a pre-defined list, therefore the task is not open ended, and this variable is set to `False`
* **`annotation_vector`:** list of possible crowd answers, mandatory to declare when `open_ended_task` is `False`; for our task, this is the list of relations
* **`processJudgments`:** method that defines processing of the raw crowd data; for this task, we process the crowd answers to correspond to the values in `annotation_vector`
The complete configuration class is declared below:
```
class TestConfig(DefaultConfig):
inputColumns = ["sent_id", "term1", "b1", "e1", "term2", "b2", "e2", "sentence"]
outputColumns = ["relations"]
annotation_separator = "\n"
# processing of a closed task
open_ended_task = False
annotation_vector = [
"title", "founded_org", "place_of_birth", "children", "cause_of_death",
"top_member_employee_of_org", "employee_or_member_of", "spouse",
"alternate_names", "subsidiaries", "place_of_death", "schools_attended",
"place_of_headquarters", "charges", "origin", "places_of_residence",
"none"]
def processJudgments(self, judgments):
# pre-process output to match the values in annotation_vector
for col in self.outputColumns:
# transform to lowercase
judgments[col] = judgments[col].apply(lambda x: str(x).lower())
return judgments
```
## Pre-processing the input data
After declaring the configuration of our input file, we are ready to pre-process the crowd data:
```
data, config = crowdtruth.load(
file = "../data/relex-multiple-choice.csv",
config = TestConfig()
)
data['judgments'].head()
```
## Computing the CrowdTruth metrics
The pre-processed data can then be used to calculate the CrowdTruth metrics:
```
results = crowdtruth.run(data, config)
```
`results` is a dict object that contains the quality metrics for sentences, relations and crowd workers.
The **sentence metrics** are stored in `results["units"]`:
```
results["units"].head()
```
The `uqs` column in `results["units"]` contains the **sentence quality scores**, capturing the overall workers agreement over each sentence. Here we plot its histogram:
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(results["units"]["uqs"])
plt.xlabel("Sentence Quality Score")
plt.ylabel("Sentences")
```
The `unit_annotation_score` column in `results["units"]` contains the **sentence-relation scores**, capturing the likelihood that a relation is expressed in a sentence. For each sentence, we store a dictionary mapping each relation to its sentence-relation score.
```
results["units"]["unit_annotation_score"].head()
```
The **worker metrics** are stored in `results["workers"]`:
```
results["workers"].head()
```
The `wqs` columns in `results["workers"]` contains the **worker quality scores**, capturing the overall agreement between one worker and all the other workers.
```
plt.hist(results["workers"]["wqs"])
plt.xlabel("Worker Quality Score")
plt.ylabel("Workers")
```
The **relation metrics** are stored in `results["annotations"]`. The `aqs` column contains the **relation quality scores**, capturing the overall worker agreement over one relation.
```
results["annotations"]
```
| github_jupyter |
```
import scipy.io as sio
def readQTMFile(qtmFile):
content = sio.loadmat(qtmFile)
index = 0
mat_var_index = 0
for key in content.keys():
index = key.find('__') #the variable in the matlab file is the first key that don't have this chars
if index == -1:
break
mat_var_index += 1
if index != -1:
raise ValueError("File format wrong. It does not have the initial variable")
root_var = content[content.keys()[mat_var_index]][0,0]
trajectories = root_var['Trajectories'][0,0]['Unidentified'][0,0]['Data']
new_content = {
'frame_rate': root_var['FrameRate'][0,0],
'trajectories': trajectories,
'frames': root_var['Frames'][0,0],
'number_markers': trajectories.shape[0]
}
return new_content
data = readQTMFile("data/JULIANA2701w4.mat")
positional_data = {}
positional_data['patient_id'] = 0
positional_data['gait_sample_index'] = 0
positional_data['frame_rate'] = data['frame_rate']
positional_data['frames'] = data['frames']
positional_data['number_markers'] = data['number_markers']
positional_data['original_filename'] = "JULIANA2701w4.mat"
#Precisa configurar
positional_data['initial_frame'] = 120
positional_data['final_frame'] = 189
markers = [];
for i in range(data['number_markers']):
markers.append('')
positional_data['markers'] = markers
positional_data['trajectories'] = data['trajectories'].tolist()
#markers
positional_data['markers'][10] = 'Left Tibia'
positional_data['markers'][7] = 'Left Knee'
positional_data['markers'][29] = 'Left Trochanter'
positional_data['markers'][14] = 'Right Tibia'
positional_data['markers'][18] = 'Right Knee'
positional_data['markers'][31] = 'Right Trochanter'
import numpy as np
def cut_trajectories(pos):
trajectories = np.array(pos['trajectories'])
if 'initial_frame' in pos and 'final_frame' in pos and 'frames' in pos:
initial = pos['initial_frame']
final = pos['final_frame']
frames = pos['frames']
if initial >0 and initial < final and final < frames:
trajectories = trajectories[:,:, initial:final]
return trajectories
#cinematic
def calc_angular_velocities(origins, components_a, components_b, time):
angles = get_angles(origins, components_a, components_b)
final = angles[1: len(angles)]
initial = angles[0: len(angles) -1]
return (final - initial) / time
def calc_angular_accelerations(angular_velocities, time):
final = angular_velocities[1: len(angular_velocities)]
initial = angular_velocities[0: len(angular_velocities) -1]
return (final - initial) / time
def get_angles(origins, components_a, components_b):
trans_a = components_a - origins
trans_b = components_b - origins
angles = np.arccos(np.sum(trans_a * trans_b, axis = 1)/(np.sqrt(np.sum(trans_a ** 2, axis = 1)) * np.sqrt(np.sum(trans_b ** 2, axis = 1))))
return (np.pi - angles) * (180/np.pi)
def get_vectorial_velocities(vector, time):
final_position = vector[1: len(vector)]
initial_position = vector[0: len(vector) - 1]
return (final_position - initial_position) / time
def get_3d_velocities(vector_x, vector_y, vector_z, time):
return (get_vectorial_velocities(vector_x, time), get_vectorial_velocities(vector_y, time), get_vectorial_velocities(vector_z, time))
#Colocando os dados dentro do ciclo de marcha
cut_t = cut_trajectories(positional_data)
#foram invertidos após observação dos dados que mostrava que estavam ao contrario
#angulo joelho esquerdo
l_origin = cut_t[7][0:3][:] #Joelho
l_component_a = cut_t[29][0:3][:] #Trocanter
l_component_b = cut_t[10][0:3][:] #Tibia
#angulo joelho direito
origin = cut_t[18][0:3][:] #joelho
component_a = cut_t[31][0:3][:]#Trocanter
component_b = cut_t[14][0:3][:] #Tibia
#features da perna direita
a = get_angles(
np.array(origin).T,
np.array(component_a).T,
np.array(component_b).T)
av = calc_angular_velocities(np.array(origin).T,
np.array(component_a).T,
np.array(component_b).T,
1/float(positional_data['frame_rate']))
aa = calc_angular_accelerations(av, 1/float(positional_data['frame_rate']))
# as velocidades vem do trocanter
velocities3d = np.vstack(get_3d_velocities(component_a.T[:,0],
component_a.T[:,1],
component_a.T[:,2],
1/float(positional_data['frame_rate']))).T
#features da perna esquerda
l_a = get_angles(
np.array(l_origin).T,
np.array(l_component_a).T,
np.array(l_component_b).T)
l_av = calc_angular_velocities(np.array(l_origin).T,
np.array(l_component_a).T,
np.array(l_component_b).T,
1/float(positional_data['frame_rate']))
l_aa = calc_angular_accelerations(l_av, 1/float(positional_data['frame_rate']))
# as velocidades vem do trocanter
l_velocities3d = np.vstack(get_3d_velocities(l_component_a.T[:,0],
l_component_a.T[:,1],
l_component_a.T[:,2],
1/float(positional_data['frame_rate']))).T
## salvando em arquivo na pasta
from Data_Savior import save_it_now
save_it_now(a, av, aa, velocities3d, l_a, l_av, l_aa, l_velocities3d, "./preprocessed_data/JULIANA2701w4.data")
%matplotlib inline
a_img = a
a_dom = 100 * np.arange(0, len(a_img))/np.float(len(a_img))
lr_i = 0
lr_f = a_dom.max() * 0.12
mst_i = lr_f
mst_f = a_dom.max() * 0.31
tst_i = mst_f
tst_f = a_dom.max() * 0.50
psw_i = tst_f
psw_f = a_dom.max() * 0.62
isw_i = psw_f
isw_f = a_dom.max() * 0.75
msw_i = isw_f
msw_f = a_dom.max() * 0.87
tsw_i = msw_f
tsw_f = a_dom.max() * 1
import matplotlib.pyplot as plt
fig = plt.figure(1)
plt.subplot(1,1,1)
plt.title("Angles for %s" % "Right Knee")
plt.ylabel ("Degrees")
plt.xlabel ("Percentual Gait Cycle")
plt.axis([0, a_dom.max(), a_img.min(), a_img.max()])
curve_a, = plt.plot(a_dom, a_img, 'r')
plt.axvspan(xmin = lr_i, xmax=lr_f, ymin =0, ymax=1, alpha = 0.2, color='b')
plt.annotate('LR', xy=(lr_i + 5, a_img.max() * 0.90))
plt.axvspan(xmin = mst_i, xmax=mst_f, ymin =0, ymax=1, alpha = 0.2, color='y')
plt.annotate('MSt', xy=(mst_i + 5, a_img.max() * 0.90))
plt.axvspan(xmin = tst_i, xmax=tst_f, ymin =0, ymax=1, alpha = 0.4, color='y')
plt.annotate('TSt', xy=(tst_i + 5, a_img.max() * 0.90))
plt.axvspan(xmin = psw_i, xmax=psw_f, ymin =0, ymax=1, alpha = 0.2, color='b')
plt.annotate('PSw', xy=(psw_i + 5, a_img.max() * 0.90))
plt.axvspan(xmin = isw_i, xmax=isw_f, ymin =0, ymax=1, alpha = 0.2, color='y')
plt.annotate('ISw', xy=(isw_i + 5, a_img.max() * 0.90))
plt.axvspan(xmin = msw_i, xmax=msw_f, ymin =0, ymax=1, alpha = 0.4, color='y')
plt.annotate('MSw', xy=(msw_i + 5, a_img.max() * 0.90))
plt.axvspan(xmin = tsw_i, xmax=tsw_f, ymin =0, ymax=1, alpha = 0.6, color='y')
plt.annotate('TSw', xy=(tsw_i + 5, a_img.max() * 0.90))
plt.show()
av_img = av
av_dom = 100 * np.arange(0, len(av_img))/np.float(len(av_img))
lr_i = 0
lr_f = av_dom.max() * 0.12
mst_i = lr_f
mst_f = av_dom.max() * 0.31
tst_i = mst_f
tst_f = av_dom.max() * 0.50
psw_i = tst_f
psw_f = av_dom.max() * 0.62
isw_i = psw_f
isw_f = av_dom.max() * 0.75
msw_i = isw_f
msw_f = av_dom.max() * 0.87
tsw_i = msw_f
tsw_f = av_dom.max() * 1
import matplotlib.pyplot as plt
fig = plt.figure(1)
plt.subplot(1,1,1)
plt.title("Angular Velociteis for %s" % "Angular Velocities Right knee")
plt.ylabel ("Degrees/Seconds")
plt.xlabel ("Percentual Gait Cycle")
plt.axis([0, av_dom.max(), av_img.min(), av_img.max()])
curve_av, = plt.plot(av_dom, av_img, 'r')
plt.axvspan(xmin = lr_i, xmax=lr_f, ymin =0, ymax=1, alpha = 0.2, color='b')
plt.annotate('LR', xy=(lr_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = mst_i, xmax=mst_f, ymin =0, ymax=1, alpha = 0.2, color='y')
plt.annotate('MSt', xy=(mst_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = tst_i, xmax=tst_f, ymin =0, ymax=1, alpha = 0.4, color='y')
plt.annotate('TSt', xy=(tst_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = psw_i, xmax=psw_f, ymin =0, ymax=1, alpha = 0.2, color='b')
plt.annotate('PSw', xy=(psw_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = isw_i, xmax=isw_f, ymin =0, ymax=1, alpha = 0.2, color='y')
plt.annotate('ISw', xy=(isw_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = msw_i, xmax=msw_f, ymin =0, ymax=1, alpha = 0.4, color='y')
plt.annotate('MSw', xy=(msw_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = tsw_i, xmax=tsw_f, ymin =0, ymax=1, alpha = 0.6, color='y')
plt.annotate('TSw', xy=(tsw_i + 5, av_img.max() * 0.90))
plt.show()
av_img = aa
av_dom = 100 * np.arange(0, len(av_img))/np.float(len(av_img))
lr_i = 0
lr_f = av_dom.max() * 0.12
mst_i = lr_f
mst_f = av_dom.max() * 0.31
tst_i = mst_f
tst_f = av_dom.max() * 0.50
psw_i = tst_f
psw_f = av_dom.max() * 0.62
isw_i = psw_f
isw_f = av_dom.max() * 0.75
msw_i = isw_f
msw_f = av_dom.max() * 0.87
tsw_i = msw_f
tsw_f = av_dom.max() * 1
import matplotlib.pyplot as plt
fig = plt.figure(1)
plt.subplot(1,1,1)
plt.title("Angular Accelerations for %s" % "Angular Acceleration")
plt.ylabel ("Degrees/Seconds^2")
plt.xlabel ("Percentual Gait Cycle")
plt.axis([0, av_dom.max(), av_img.min(), av_img.max()])
curve_av, = plt.plot(av_dom, av_img, 'r')
plt.axvspan(xmin = lr_i, xmax=lr_f, ymin =0, ymax=1, alpha = 0.2, color='b')
plt.annotate('LR', xy=(lr_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = mst_i, xmax=mst_f, ymin =0, ymax=1, alpha = 0.2, color='y')
plt.annotate('MSt', xy=(mst_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = tst_i, xmax=tst_f, ymin =0, ymax=1, alpha = 0.4, color='y')
plt.annotate('TSt', xy=(tst_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = psw_i, xmax=psw_f, ymin =0, ymax=1, alpha = 0.2, color='b')
plt.annotate('PSw', xy=(psw_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = isw_i, xmax=isw_f, ymin =0, ymax=1, alpha = 0.2, color='y')
plt.annotate('ISw', xy=(isw_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = msw_i, xmax=msw_f, ymin =0, ymax=1, alpha = 0.4, color='y')
plt.annotate('MSw', xy=(msw_i + 5, av_img.max() * 0.90))
plt.axvspan(xmin = tsw_i, xmax=tsw_f, ymin =0, ymax=1, alpha = 0.6, color='y')
plt.annotate('TSw', xy=(tsw_i + 5, av_img.max() * 0.90))
plt.show()
```
| github_jupyter |
# Neo4j
Neo4j is a graph database, and is useful when the *relationships* between items in our database is of interest.
- [Developer's manual](https://neo4j.com/docs/developer-manual/3.4/)
- [The Cypher query language](https://neo4j.com/docs/developer-manual/3.4/cypher/)
- [Cypher magic](https://ipython-cypher.readthedocs.io/en/latest/)
- [The Graph Data Science manual for neo4j](https://neo4j.com/docs/graph-data-science/current/)
Neo4j is not currently running on the OIT servers.
If you want to type along
- Use the neo4j sanbox https://neo4j.com/sandbox/
- Install neo4j desktop on your local computer
## Official driver
```bash
! python3 -m pip install --quiet neo4j
```
## Cypher
```
from neo4j import GraphDatabase
g = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "neo4j"))
s = g.session()
```
### Basic syntax
Syntax is meant to be visuallly evocative of node () and edge -->
- Nodes are `(variable:Label {key-value properties})`
- Edges are `-[variable:Label {ke-value properties}]->`
### Create a database
This is a small subset of the full database. If you want to play with the full database, see [instructions](https://neo4j.com/developer/movie-database/)
Note: There are [other ways to create a graph database by importing data](https://neo4j.com/developer/guide-importing-data-and-etl/)
```
q = """
CREATE (TheMatrix:Movie {title:'The Matrix', released:1999, tagline:'Welcome to the Real World'})
CREATE (Keanu:Person {name:'Keanu Reeves', born:1964})
CREATE (Carrie:Person {name:'Carrie-Anne Moss', born:1967})
CREATE (Laurence:Person {name:'Laurence Fishburne', born:1961})
CREATE (Hugo:Person {name:'Hugo Weaving', born:1960})
CREATE (LillyW:Person {name:'Lilly Wachowski', born:1967})
CREATE (LanaW:Person {name:'Lana Wachowski', born:1965})
CREATE (JoelS:Person {name:'Joel Silver', born:1952})
CREATE
(Keanu)-[:ACTED_IN {roles:['Neo']}]->(TheMatrix),
(Carrie)-[:ACTED_IN {roles:['Trinity']}]->(TheMatrix),
(Laurence)-[:ACTED_IN {roles:['Morpheus']}]->(TheMatrix),
(Hugo)-[:ACTED_IN {roles:['Agent Smith']}]->(TheMatrix),
(LillyW)-[:DIRECTED]->(TheMatrix),
(LanaW)-[:DIRECTED]->(TheMatrix),
(JoelS)-[:PRODUCED]->(TheMatrix)
CREATE (Emil:Person {name:"Emil Eifrem", born:1978})
CREATE (Emil)-[:ACTED_IN {roles:["Emil"]}]->(TheMatrix)
CREATE (TheMatrixReloaded:Movie {title:'The Matrix Reloaded', released:2003, tagline:'Free your mind'})
CREATE
(Keanu)-[:ACTED_IN {roles:['Neo']}]->(TheMatrixReloaded),
(Carrie)-[:ACTED_IN {roles:['Trinity']}]->(TheMatrixReloaded),
(Laurence)-[:ACTED_IN {roles:['Morpheus']}]->(TheMatrixReloaded),
(Hugo)-[:ACTED_IN {roles:['Agent Smith']}]->(TheMatrixReloaded),
(LillyW)-[:DIRECTED]->(TheMatrixReloaded),
(LanaW)-[:DIRECTED]->(TheMatrixReloaded),
(JoelS)-[:PRODUCED]->(TheMatrixReloaded)
CREATE (TheMatrixRevolutions:Movie {title:'The Matrix Revolutions', released:2003, tagline:'Everything that has a beginning has an end'})
CREATE
(Keanu)-[:ACTED_IN {roles:['Neo']}]->(TheMatrixRevolutions),
(Carrie)-[:ACTED_IN {roles:['Trinity']}]->(TheMatrixRevolutions),
(Laurence)-[:ACTED_IN {roles:['Morpheus']}]->(TheMatrixRevolutions),
(Hugo)-[:ACTED_IN {roles:['Agent Smith']}]->(TheMatrixRevolutions),
(LillyW)-[:DIRECTED]->(TheMatrixRevolutions),
(LanaW)-[:DIRECTED]->(TheMatrixRevolutions),
(JoelS)-[:PRODUCED]->(TheMatrixRevolutions)
CREATE (TheDevilsAdvocate:Movie {title:"The Devil's Advocate", released:1997, tagline:'Evil has its winning ways'})
CREATE (Charlize:Person {name:'Charlize Theron', born:1975})
CREATE (Al:Person {name:'Al Pacino', born:1940})
CREATE (Taylor:Person {name:'Taylor Hackford', born:1944})
CREATE
(Keanu)-[:ACTED_IN {roles:['Kevin Lomax']}]->(TheDevilsAdvocate),
(Charlize)-[:ACTED_IN {roles:['Mary Ann Lomax']}]->(TheDevilsAdvocate),
(Al)-[:ACTED_IN {roles:['John Milton']}]->(TheDevilsAdvocate),
(Taylor)-[:DIRECTED]->(TheDevilsAdvocate)
CREATE (AFewGoodMen:Movie {title:"A Few Good Men", released:1992, tagline:"In the heart of the nation's capital, in a courthouse of the U.S. government, one man will stop at nothing to keep his honor, and one will stop at nothing to find the truth."})
CREATE (TomC:Person {name:'Tom Cruise', born:1962})
CREATE (JackN:Person {name:'Jack Nicholson', born:1937})
CREATE (DemiM:Person {name:'Demi Moore', born:1962})
CREATE (KevinB:Person {name:'Kevin Bacon', born:1958})
CREATE (KieferS:Person {name:'Kiefer Sutherland', born:1966})
CREATE (NoahW:Person {name:'Noah Wyle', born:1971})
CREATE (CubaG:Person {name:'Cuba Gooding Jr.', born:1968})
CREATE (KevinP:Person {name:'Kevin Pollak', born:1957})
CREATE (JTW:Person {name:'J.T. Walsh', born:1943})
CREATE (JamesM:Person {name:'James Marshall', born:1967})
CREATE (ChristopherG:Person {name:'Christopher Guest', born:1948})
CREATE (RobR:Person {name:'Rob Reiner', born:1947})
CREATE (AaronS:Person {name:'Aaron Sorkin', born:1961})
CREATE
(TomC)-[:ACTED_IN {roles:['Lt. Daniel Kaffee']}]->(AFewGoodMen),
(JackN)-[:ACTED_IN {roles:['Col. Nathan R. Jessup']}]->(AFewGoodMen),
(DemiM)-[:ACTED_IN {roles:['Lt. Cdr. JoAnne Galloway']}]->(AFewGoodMen),
(KevinB)-[:ACTED_IN {roles:['Capt. Jack Ross']}]->(AFewGoodMen),
(KieferS)-[:ACTED_IN {roles:['Lt. Jonathan Kendrick']}]->(AFewGoodMen),
(NoahW)-[:ACTED_IN {roles:['Cpl. Jeffrey Barnes']}]->(AFewGoodMen),
(CubaG)-[:ACTED_IN {roles:['Cpl. Carl Hammaker']}]->(AFewGoodMen),
(KevinP)-[:ACTED_IN {roles:['Lt. Sam Weinberg']}]->(AFewGoodMen),
(JTW)-[:ACTED_IN {roles:['Lt. Col. Matthew Andrew Markinson']}]->(AFewGoodMen),
(JamesM)-[:ACTED_IN {roles:['Pfc. Louden Downey']}]->(AFewGoodMen),
(ChristopherG)-[:ACTED_IN {roles:['Dr. Stone']}]->(AFewGoodMen),
(AaronS)-[:ACTED_IN {roles:['Man in Bar']}]->(AFewGoodMen),
(RobR)-[:DIRECTED]->(AFewGoodMen),
(AaronS)-[:WROTE]->(AFewGoodMen)
CREATE (TopGun:Movie {title:"Top Gun", released:1986, tagline:'I feel the need, the need for speed.'})
CREATE (KellyM:Person {name:'Kelly McGillis', born:1957})
CREATE (ValK:Person {name:'Val Kilmer', born:1959})
CREATE (AnthonyE:Person {name:'Anthony Edwards', born:1962})
CREATE (TomS:Person {name:'Tom Skerritt', born:1933})
CREATE (MegR:Person {name:'Meg Ryan', born:1961})
CREATE (TonyS:Person {name:'Tony Scott', born:1944})
CREATE (JimC:Person {name:'Jim Cash', born:1941})
CREATE
(TomC)-[:ACTED_IN {roles:['Maverick']}]->(TopGun),
(KellyM)-[:ACTED_IN {roles:['Charlie']}]->(TopGun),
(ValK)-[:ACTED_IN {roles:['Iceman']}]->(TopGun),
(AnthonyE)-[:ACTED_IN {roles:['Goose']}]->(TopGun),
(TomS)-[:ACTED_IN {roles:['Viper']}]->(TopGun),
(MegR)-[:ACTED_IN {roles:['Carole']}]->(TopGun),
(TonyS)-[:DIRECTED]->(TopGun),
(JimC)-[:WROTE]->(TopGun)
CREATE (JerryMaguire:Movie {title:'Jerry Maguire', released:2000, tagline:'The rest of his life begins now.'})
CREATE (ReneeZ:Person {name:'Renee Zellweger', born:1969})
CREATE (KellyP:Person {name:'Kelly Preston', born:1962})
CREATE (JerryO:Person {name:"Jerry O'Connell", born:1974})
CREATE (JayM:Person {name:'Jay Mohr', born:1970})
CREATE (BonnieH:Person {name:'Bonnie Hunt', born:1961})
CREATE (ReginaK:Person {name:'Regina King', born:1971})
CREATE (JonathanL:Person {name:'Jonathan Lipnicki', born:1996})
CREATE (CameronC:Person {name:'Cameron Crowe', born:1957})
CREATE
(TomC)-[:ACTED_IN {roles:['Jerry Maguire']}]->(JerryMaguire),
(CubaG)-[:ACTED_IN {roles:['Rod Tidwell']}]->(JerryMaguire),
(ReneeZ)-[:ACTED_IN {roles:['Dorothy Boyd']}]->(JerryMaguire),
(KellyP)-[:ACTED_IN {roles:['Avery Bishop']}]->(JerryMaguire),
(JerryO)-[:ACTED_IN {roles:['Frank Cushman']}]->(JerryMaguire),
(JayM)-[:ACTED_IN {roles:['Bob Sugar']}]->(JerryMaguire),
(BonnieH)-[:ACTED_IN {roles:['Laurel Boyd']}]->(JerryMaguire),
(ReginaK)-[:ACTED_IN {roles:['Marcee Tidwell']}]->(JerryMaguire),
(JonathanL)-[:ACTED_IN {roles:['Ray Boyd']}]->(JerryMaguire),
(CameronC)-[:DIRECTED]->(JerryMaguire),
(CameronC)-[:PRODUCED]->(JerryMaguire),
(CameronC)-[:WROTE]->(JerryMaguire)
CREATE (StandByMe:Movie {title:"Stand By Me", released:1986, tagline:"For some, it's the last real taste of innocence, and the first real taste of life. But for everyone, it's the time that memories are made of."})
CREATE (RiverP:Person {name:'River Phoenix', born:1970})
CREATE (CoreyF:Person {name:'Corey Feldman', born:1971})
CREATE (WilW:Person {name:'Wil Wheaton', born:1972})
CREATE (JohnC:Person {name:'John Cusack', born:1966})
CREATE (MarshallB:Person {name:'Marshall Bell', born:1942})
CREATE
(WilW)-[:ACTED_IN {roles:['Gordie Lachance']}]->(StandByMe),
(RiverP)-[:ACTED_IN {roles:['Chris Chambers']}]->(StandByMe),
(JerryO)-[:ACTED_IN {roles:['Vern Tessio']}]->(StandByMe),
(CoreyF)-[:ACTED_IN {roles:['Teddy Duchamp']}]->(StandByMe),
(JohnC)-[:ACTED_IN {roles:['Denny Lachance']}]->(StandByMe),
(KieferS)-[:ACTED_IN {roles:['Ace Merrill']}]->(StandByMe),
(MarshallB)-[:ACTED_IN {roles:['Mr. Lachance']}]->(StandByMe),
(RobR)-[:DIRECTED]->(StandByMe)
CREATE (AsGoodAsItGets:Movie {title:'As Good as It Gets', released:1997, tagline:'A comedy from the heart that goes for the throat.'})
CREATE (HelenH:Person {name:'Helen Hunt', born:1963})
CREATE (GregK:Person {name:'Greg Kinnear', born:1963})
CREATE (JamesB:Person {name:'James L. Brooks', born:1940})
CREATE
(JackN)-[:ACTED_IN {roles:['Melvin Udall']}]->(AsGoodAsItGets),
(HelenH)-[:ACTED_IN {roles:['Carol Connelly']}]->(AsGoodAsItGets),
(GregK)-[:ACTED_IN {roles:['Simon Bishop']}]->(AsGoodAsItGets),
(CubaG)-[:ACTED_IN {roles:['Frank Sachs']}]->(AsGoodAsItGets),
(JamesB)-[:DIRECTED]->(AsGoodAsItGets)
CREATE (WhatDreamsMayCome:Movie {title:'What Dreams May Come', released:1998, tagline:'After life there is more. The end is just the beginning.'})
CREATE (AnnabellaS:Person {name:'Annabella Sciorra', born:1960})
CREATE (MaxS:Person {name:'Max von Sydow', born:1929})
CREATE (WernerH:Person {name:'Werner Herzog', born:1942})
CREATE (Robin:Person {name:'Robin Williams', born:1951})
CREATE (VincentW:Person {name:'Vincent Ward', born:1956})
CREATE
(Robin)-[:ACTED_IN {roles:['Chris Nielsen']}]->(WhatDreamsMayCome),
(CubaG)-[:ACTED_IN {roles:['Albert Lewis']}]->(WhatDreamsMayCome),
(AnnabellaS)-[:ACTED_IN {roles:['Annie Collins-Nielsen']}]->(WhatDreamsMayCome),
(MaxS)-[:ACTED_IN {roles:['The Tracker']}]->(WhatDreamsMayCome),
(WernerH)-[:ACTED_IN {roles:['The Face']}]->(WhatDreamsMayCome),
(VincentW)-[:DIRECTED]->(WhatDreamsMayCome)
CREATE (SnowFallingonCedars:Movie {title:'Snow Falling on Cedars', released:1999, tagline:'First loves last. Forever.'})
CREATE (EthanH:Person {name:'Ethan Hawke', born:1970})
CREATE (RickY:Person {name:'Rick Yune', born:1971})
CREATE (JamesC:Person {name:'James Cromwell', born:1940})
CREATE (ScottH:Person {name:'Scott Hicks', born:1953})
CREATE
(EthanH)-[:ACTED_IN {roles:['Ishmael Chambers']}]->(SnowFallingonCedars),
(RickY)-[:ACTED_IN {roles:['Kazuo Miyamoto']}]->(SnowFallingonCedars),
(MaxS)-[:ACTED_IN {roles:['Nels Gudmundsson']}]->(SnowFallingonCedars),
(JamesC)-[:ACTED_IN {roles:['Judge Fielding']}]->(SnowFallingonCedars),
(ScottH)-[:DIRECTED]->(SnowFallingonCedars)
CREATE (YouveGotMail:Movie {title:"You've Got Mail", released:1998, tagline:'At odds in life... in love on-line.'})
CREATE (ParkerP:Person {name:'Parker Posey', born:1968})
CREATE (DaveC:Person {name:'Dave Chappelle', born:1973})
CREATE (SteveZ:Person {name:'Steve Zahn', born:1967})
CREATE (TomH:Person {name:'Tom Hanks', born:1956})
CREATE (NoraE:Person {name:'Nora Ephron', born:1941})
CREATE
(TomH)-[:ACTED_IN {roles:['Joe Fox']}]->(YouveGotMail),
(MegR)-[:ACTED_IN {roles:['Kathleen Kelly']}]->(YouveGotMail),
(GregK)-[:ACTED_IN {roles:['Frank Navasky']}]->(YouveGotMail),
(ParkerP)-[:ACTED_IN {roles:['Patricia Eden']}]->(YouveGotMail),
(DaveC)-[:ACTED_IN {roles:['Kevin Jackson']}]->(YouveGotMail),
(SteveZ)-[:ACTED_IN {roles:['George Pappas']}]->(YouveGotMail),
(NoraE)-[:DIRECTED]->(YouveGotMail)
CREATE (SleeplessInSeattle:Movie {title:'Sleepless in Seattle', released:1993, tagline:'What if someone you never met, someone you never saw, someone you never knew was the only someone for you?'})
CREATE (RitaW:Person {name:'Rita Wilson', born:1956})
CREATE (BillPull:Person {name:'Bill Pullman', born:1953})
CREATE (VictorG:Person {name:'Victor Garber', born:1949})
CREATE (RosieO:Person {name:"Rosie O'Donnell", born:1962})
CREATE
(TomH)-[:ACTED_IN {roles:['Sam Baldwin']}]->(SleeplessInSeattle),
(MegR)-[:ACTED_IN {roles:['Annie Reed']}]->(SleeplessInSeattle),
(RitaW)-[:ACTED_IN {roles:['Suzy']}]->(SleeplessInSeattle),
(BillPull)-[:ACTED_IN {roles:['Walter']}]->(SleeplessInSeattle),
(VictorG)-[:ACTED_IN {roles:['Greg']}]->(SleeplessInSeattle),
(RosieO)-[:ACTED_IN {roles:['Becky']}]->(SleeplessInSeattle),
(NoraE)-[:DIRECTED]->(SleeplessInSeattle)
CREATE (JoeVersustheVolcano:Movie {title:'Joe Versus the Volcano', released:1990, tagline:'A story of love, lava and burning desire.'})
CREATE (JohnS:Person {name:'John Patrick Stanley', born:1950})
CREATE (Nathan:Person {name:'Nathan Lane', born:1956})
CREATE
(TomH)-[:ACTED_IN {roles:['Joe Banks']}]->(JoeVersustheVolcano),
(MegR)-[:ACTED_IN {roles:['DeDe', 'Angelica Graynamore', 'Patricia Graynamore']}]->(JoeVersustheVolcano),
(Nathan)-[:ACTED_IN {roles:['Baw']}]->(JoeVersustheVolcano),
(JohnS)-[:DIRECTED]->(JoeVersustheVolcano)
CREATE (WhenHarryMetSally:Movie {title:'When Harry Met Sally', released:1998, tagline:'At odds in life... in love on-line.'})
CREATE (BillyC:Person {name:'Billy Crystal', born:1948})
CREATE (CarrieF:Person {name:'Carrie Fisher', born:1956})
CREATE (BrunoK:Person {name:'Bruno Kirby', born:1949})
CREATE
(BillyC)-[:ACTED_IN {roles:['Harry Burns']}]->(WhenHarryMetSally),
(MegR)-[:ACTED_IN {roles:['Sally Albright']}]->(WhenHarryMetSally),
(CarrieF)-[:ACTED_IN {roles:['Marie']}]->(WhenHarryMetSally),
(BrunoK)-[:ACTED_IN {roles:['Jess']}]->(WhenHarryMetSally),
(RobR)-[:DIRECTED]->(WhenHarryMetSally),
(RobR)-[:PRODUCED]->(WhenHarryMetSally),
(NoraE)-[:PRODUCED]->(WhenHarryMetSally),
(NoraE)-[:WROTE]->(WhenHarryMetSally)
CREATE (ThatThingYouDo:Movie {title:'That Thing You Do', released:1996, tagline:'In every life there comes a time when that thing you dream becomes that thing you do'})
CREATE (LivT:Person {name:'Liv Tyler', born:1977})
CREATE
(TomH)-[:ACTED_IN {roles:['Mr. White']}]->(ThatThingYouDo),
(LivT)-[:ACTED_IN {roles:['Faye Dolan']}]->(ThatThingYouDo),
(Charlize)-[:ACTED_IN {roles:['Tina']}]->(ThatThingYouDo),
(TomH)-[:DIRECTED]->(ThatThingYouDo)
CREATE (TheReplacements:Movie {title:'The Replacements', released:2000, tagline:'Pain heals, Chicks dig scars... Glory lasts forever'})
CREATE (Brooke:Person {name:'Brooke Langton', born:1970})
CREATE (Gene:Person {name:'Gene Hackman', born:1930})
CREATE (Orlando:Person {name:'Orlando Jones', born:1968})
CREATE (Howard:Person {name:'Howard Deutch', born:1950})
CREATE
(Keanu)-[:ACTED_IN {roles:['Shane Falco']}]->(TheReplacements),
(Brooke)-[:ACTED_IN {roles:['Annabelle Farrell']}]->(TheReplacements),
(Gene)-[:ACTED_IN {roles:['Jimmy McGinty']}]->(TheReplacements),
(Orlando)-[:ACTED_IN {roles:['Clifford Franklin']}]->(TheReplacements),
(Howard)-[:DIRECTED]->(TheReplacements)
CREATE (RescueDawn:Movie {title:'RescueDawn', released:2006, tagline:"Based on the extraordinary true story of one man's fight for freedom"})
CREATE (ChristianB:Person {name:'Christian Bale', born:1974})
CREATE (ZachG:Person {name:'Zach Grenier', born:1954})
CREATE
(MarshallB)-[:ACTED_IN {roles:['Admiral']}]->(RescueDawn),
(ChristianB)-[:ACTED_IN {roles:['Dieter Dengler']}]->(RescueDawn),
(ZachG)-[:ACTED_IN {roles:['Squad Leader']}]->(RescueDawn),
(SteveZ)-[:ACTED_IN {roles:['Duane']}]->(RescueDawn),
(WernerH)-[:DIRECTED]->(RescueDawn)
CREATE (TheBirdcage:Movie {title:'The Birdcage', released:1996, tagline:'Come as you are'})
CREATE (MikeN:Person {name:'Mike Nichols', born:1931})
CREATE
(Robin)-[:ACTED_IN {roles:['Armand Goldman']}]->(TheBirdcage),
(Nathan)-[:ACTED_IN {roles:['Albert Goldman']}]->(TheBirdcage),
(Gene)-[:ACTED_IN {roles:['Sen. Kevin Keeley']}]->(TheBirdcage),
(MikeN)-[:DIRECTED]->(TheBirdcage)
CREATE (Unforgiven:Movie {title:'Unforgiven', released:1992, tagline:"It's a hell of a thing, killing a man"})
CREATE (RichardH:Person {name:'Richard Harris', born:1930})
CREATE (ClintE:Person {name:'Clint Eastwood', born:1930})
CREATE
(RichardH)-[:ACTED_IN {roles:['English Bob']}]->(Unforgiven),
(ClintE)-[:ACTED_IN {roles:['Bill Munny']}]->(Unforgiven),
(Gene)-[:ACTED_IN {roles:['Little Bill Daggett']}]->(Unforgiven),
(ClintE)-[:DIRECTED]->(Unforgiven)
CREATE (JohnnyMnemonic:Movie {title:'Johnny Mnemonic', released:1995, tagline:'The hottest data on earth. In the coolest head in town'})
CREATE (Takeshi:Person {name:'Takeshi Kitano', born:1947})
CREATE (Dina:Person {name:'Dina Meyer', born:1968})
CREATE (IceT:Person {name:'Ice-T', born:1958})
CREATE (RobertL:Person {name:'Robert Longo', born:1953})
CREATE
(Keanu)-[:ACTED_IN {roles:['Johnny Mnemonic']}]->(JohnnyMnemonic),
(Takeshi)-[:ACTED_IN {roles:['Takahashi']}]->(JohnnyMnemonic),
(Dina)-[:ACTED_IN {roles:['Jane']}]->(JohnnyMnemonic),
(IceT)-[:ACTED_IN {roles:['J-Bone']}]->(JohnnyMnemonic),
(RobertL)-[:DIRECTED]->(JohnnyMnemonic)
CREATE (CloudAtlas:Movie {title:'Cloud Atlas', released:2012, tagline:'Everything is connected'})
CREATE (HalleB:Person {name:'Halle Berry', born:1966})
CREATE (JimB:Person {name:'Jim Broadbent', born:1949})
CREATE (TomT:Person {name:'Tom Tykwer', born:1965})
CREATE (DavidMitchell:Person {name:'David Mitchell', born:1969})
CREATE (StefanArndt:Person {name:'Stefan Arndt', born:1961})
CREATE
(TomH)-[:ACTED_IN {roles:['Zachry', 'Dr. Henry Goose', 'Isaac Sachs', 'Dermot Hoggins']}]->(CloudAtlas),
(Hugo)-[:ACTED_IN {roles:['Bill Smoke', 'Haskell Moore', 'Tadeusz Kesselring', 'Nurse Noakes', 'Boardman Mephi', 'Old Georgie']}]->(CloudAtlas),
(HalleB)-[:ACTED_IN {roles:['Luisa Rey', 'Jocasta Ayrs', 'Ovid', 'Meronym']}]->(CloudAtlas),
(JimB)-[:ACTED_IN {roles:['Vyvyan Ayrs', 'Captain Molyneux', 'Timothy Cavendish']}]->(CloudAtlas),
(TomT)-[:DIRECTED]->(CloudAtlas),
(LillyW)-[:DIRECTED]->(CloudAtlas),
(LanaW)-[:DIRECTED]->(CloudAtlas),
(DavidMitchell)-[:WROTE]->(CloudAtlas),
(StefanArndt)-[:PRODUCED]->(CloudAtlas)
CREATE (TheDaVinciCode:Movie {title:'The Da Vinci Code', released:2006, tagline:'Break The Codes'})
CREATE (IanM:Person {name:'Ian McKellen', born:1939})
CREATE (AudreyT:Person {name:'Audrey Tautou', born:1976})
CREATE (PaulB:Person {name:'Paul Bettany', born:1971})
CREATE (RonH:Person {name:'Ron Howard', born:1954})
CREATE
(TomH)-[:ACTED_IN {roles:['Dr. Robert Langdon']}]->(TheDaVinciCode),
(IanM)-[:ACTED_IN {roles:['Sir Leight Teabing']}]->(TheDaVinciCode),
(AudreyT)-[:ACTED_IN {roles:['Sophie Neveu']}]->(TheDaVinciCode),
(PaulB)-[:ACTED_IN {roles:['Silas']}]->(TheDaVinciCode),
(RonH)-[:DIRECTED]->(TheDaVinciCode)
CREATE (VforVendetta:Movie {title:'V for Vendetta', released:2006, tagline:'Freedom! Forever!'})
CREATE (NatalieP:Person {name:'Natalie Portman', born:1981})
CREATE (StephenR:Person {name:'Stephen Rea', born:1946})
CREATE (JohnH:Person {name:'John Hurt', born:1940})
CREATE (BenM:Person {name: 'Ben Miles', born:1967})
CREATE
(Hugo)-[:ACTED_IN {roles:['V']}]->(VforVendetta),
(NatalieP)-[:ACTED_IN {roles:['Evey Hammond']}]->(VforVendetta),
(StephenR)-[:ACTED_IN {roles:['Eric Finch']}]->(VforVendetta),
(JohnH)-[:ACTED_IN {roles:['High Chancellor Adam Sutler']}]->(VforVendetta),
(BenM)-[:ACTED_IN {roles:['Dascomb']}]->(VforVendetta),
(JamesM)-[:DIRECTED]->(VforVendetta),
(LillyW)-[:PRODUCED]->(VforVendetta),
(LanaW)-[:PRODUCED]->(VforVendetta),
(JoelS)-[:PRODUCED]->(VforVendetta),
(LillyW)-[:WROTE]->(VforVendetta),
(LanaW)-[:WROTE]->(VforVendetta)
CREATE (SpeedRacer:Movie {title:'Speed Racer', released:2008, tagline:'Speed has no limits'})
CREATE (EmileH:Person {name:'Emile Hirsch', born:1985})
CREATE (JohnG:Person {name:'John Goodman', born:1960})
CREATE (SusanS:Person {name:'Susan Sarandon', born:1946})
CREATE (MatthewF:Person {name:'Matthew Fox', born:1966})
CREATE (ChristinaR:Person {name:'Christina Ricci', born:1980})
CREATE (Rain:Person {name:'Rain', born:1982})
CREATE
(EmileH)-[:ACTED_IN {roles:['Speed Racer']}]->(SpeedRacer),
(JohnG)-[:ACTED_IN {roles:['Pops']}]->(SpeedRacer),
(SusanS)-[:ACTED_IN {roles:['Mom']}]->(SpeedRacer),
(MatthewF)-[:ACTED_IN {roles:['Racer X']}]->(SpeedRacer),
(ChristinaR)-[:ACTED_IN {roles:['Trixie']}]->(SpeedRacer),
(Rain)-[:ACTED_IN {roles:['Taejo Togokahn']}]->(SpeedRacer),
(BenM)-[:ACTED_IN {roles:['Cass Jones']}]->(SpeedRacer),
(LillyW)-[:DIRECTED]->(SpeedRacer),
(LanaW)-[:DIRECTED]->(SpeedRacer),
(LillyW)-[:WROTE]->(SpeedRacer),
(LanaW)-[:WROTE]->(SpeedRacer),
(JoelS)-[:PRODUCED]->(SpeedRacer)
CREATE (NinjaAssassin:Movie {title:'Ninja Assassin', released:2009, tagline:'Prepare to enter a secret world of assassins'})
CREATE (NaomieH:Person {name:'Naomie Harris'})
CREATE
(Rain)-[:ACTED_IN {roles:['Raizo']}]->(NinjaAssassin),
(NaomieH)-[:ACTED_IN {roles:['Mika Coretti']}]->(NinjaAssassin),
(RickY)-[:ACTED_IN {roles:['Takeshi']}]->(NinjaAssassin),
(BenM)-[:ACTED_IN {roles:['Ryan Maslow']}]->(NinjaAssassin),
(JamesM)-[:DIRECTED]->(NinjaAssassin),
(LillyW)-[:PRODUCED]->(NinjaAssassin),
(LanaW)-[:PRODUCED]->(NinjaAssassin),
(JoelS)-[:PRODUCED]->(NinjaAssassin)
CREATE (TheGreenMile:Movie {title:'The Green Mile', released:1999, tagline:"Walk a mile you'll never forget."})
CREATE (MichaelD:Person {name:'Michael Clarke Duncan', born:1957})
CREATE (DavidM:Person {name:'David Morse', born:1953})
CREATE (SamR:Person {name:'Sam Rockwell', born:1968})
CREATE (GaryS:Person {name:'Gary Sinise', born:1955})
CREATE (PatriciaC:Person {name:'Patricia Clarkson', born:1959})
CREATE (FrankD:Person {name:'Frank Darabont', born:1959})
CREATE
(TomH)-[:ACTED_IN {roles:['Paul Edgecomb']}]->(TheGreenMile),
(MichaelD)-[:ACTED_IN {roles:['John Coffey']}]->(TheGreenMile),
(DavidM)-[:ACTED_IN {roles:['Brutus "Brutal" Howell']}]->(TheGreenMile),
(BonnieH)-[:ACTED_IN {roles:['Jan Edgecomb']}]->(TheGreenMile),
(JamesC)-[:ACTED_IN {roles:['Warden Hal Moores']}]->(TheGreenMile),
(SamR)-[:ACTED_IN {roles:['"Wild Bill" Wharton']}]->(TheGreenMile),
(GaryS)-[:ACTED_IN {roles:['Burt Hammersmith']}]->(TheGreenMile),
(PatriciaC)-[:ACTED_IN {roles:['Melinda Moores']}]->(TheGreenMile),
(FrankD)-[:DIRECTED]->(TheGreenMile)
CREATE (FrostNixon:Movie {title:'Frost/Nixon', released:2008, tagline:'400 million people were waiting for the truth.'})
CREATE (FrankL:Person {name:'Frank Langella', born:1938})
CREATE (MichaelS:Person {name:'Michael Sheen', born:1969})
CREATE (OliverP:Person {name:'Oliver Platt', born:1960})
CREATE
(FrankL)-[:ACTED_IN {roles:['Richard Nixon']}]->(FrostNixon),
(MichaelS)-[:ACTED_IN {roles:['David Frost']}]->(FrostNixon),
(KevinB)-[:ACTED_IN {roles:['Jack Brennan']}]->(FrostNixon),
(OliverP)-[:ACTED_IN {roles:['Bob Zelnick']}]->(FrostNixon),
(SamR)-[:ACTED_IN {roles:['James Reston, Jr.']}]->(FrostNixon),
(RonH)-[:DIRECTED]->(FrostNixon)
CREATE (Hoffa:Movie {title:'Hoffa', released:1992, tagline:"He didn't want law. He wanted justice."})
CREATE (DannyD:Person {name:'Danny DeVito', born:1944})
CREATE (JohnR:Person {name:'John C. Reilly', born:1965})
CREATE
(JackN)-[:ACTED_IN {roles:['Hoffa']}]->(Hoffa),
(DannyD)-[:ACTED_IN {roles:['Robert "Bobby" Ciaro']}]->(Hoffa),
(JTW)-[:ACTED_IN {roles:['Frank Fitzsimmons']}]->(Hoffa),
(JohnR)-[:ACTED_IN {roles:['Peter "Pete" Connelly']}]->(Hoffa),
(DannyD)-[:DIRECTED]->(Hoffa)
CREATE (Apollo13:Movie {title:'Apollo 13', released:1995, tagline:'Houston, we have a problem.'})
CREATE (EdH:Person {name:'Ed Harris', born:1950})
CREATE (BillPax:Person {name:'Bill Paxton', born:1955})
CREATE
(TomH)-[:ACTED_IN {roles:['Jim Lovell']}]->(Apollo13),
(KevinB)-[:ACTED_IN {roles:['Jack Swigert']}]->(Apollo13),
(EdH)-[:ACTED_IN {roles:['Gene Kranz']}]->(Apollo13),
(BillPax)-[:ACTED_IN {roles:['Fred Haise']}]->(Apollo13),
(GaryS)-[:ACTED_IN {roles:['Ken Mattingly']}]->(Apollo13),
(RonH)-[:DIRECTED]->(Apollo13)
CREATE (Twister:Movie {title:'Twister', released:1996, tagline:"Don't Breathe. Don't Look Back."})
CREATE (PhilipH:Person {name:'Philip Seymour Hoffman', born:1967})
CREATE (JanB:Person {name:'Jan de Bont', born:1943})
CREATE
(BillPax)-[:ACTED_IN {roles:['Bill Harding']}]->(Twister),
(HelenH)-[:ACTED_IN {roles:['Dr. Jo Harding']}]->(Twister),
(ZachG)-[:ACTED_IN {roles:['Eddie']}]->(Twister),
(PhilipH)-[:ACTED_IN {roles:['Dustin "Dusty" Davis']}]->(Twister),
(JanB)-[:DIRECTED]->(Twister)
CREATE (CastAway:Movie {title:'Cast Away', released:2000, tagline:'At the edge of the world, his journey begins.'})
CREATE (RobertZ:Person {name:'Robert Zemeckis', born:1951})
CREATE
(TomH)-[:ACTED_IN {roles:['Chuck Noland']}]->(CastAway),
(HelenH)-[:ACTED_IN {roles:['Kelly Frears']}]->(CastAway),
(RobertZ)-[:DIRECTED]->(CastAway)
CREATE (OneFlewOvertheCuckoosNest:Movie {title:"One Flew Over the Cuckoo's Nest", released:1975, tagline:"If he's crazy, what does that make you?"})
CREATE (MilosF:Person {name:'Milos Forman', born:1932})
CREATE
(JackN)-[:ACTED_IN {roles:['Randle McMurphy']}]->(OneFlewOvertheCuckoosNest),
(DannyD)-[:ACTED_IN {roles:['Martini']}]->(OneFlewOvertheCuckoosNest),
(MilosF)-[:DIRECTED]->(OneFlewOvertheCuckoosNest)
CREATE (SomethingsGottaGive:Movie {title:"Something's Gotta Give", released:2003})
CREATE (DianeK:Person {name:'Diane Keaton', born:1946})
CREATE (NancyM:Person {name:'Nancy Meyers', born:1949})
CREATE
(JackN)-[:ACTED_IN {roles:['Harry Sanborn']}]->(SomethingsGottaGive),
(DianeK)-[:ACTED_IN {roles:['Erica Barry']}]->(SomethingsGottaGive),
(Keanu)-[:ACTED_IN {roles:['Julian Mercer']}]->(SomethingsGottaGive),
(NancyM)-[:DIRECTED]->(SomethingsGottaGive),
(NancyM)-[:PRODUCED]->(SomethingsGottaGive),
(NancyM)-[:WROTE]->(SomethingsGottaGive)
CREATE (BicentennialMan:Movie {title:'Bicentennial Man', released:1999, tagline:"One robot's 200 year journey to become an ordinary man."})
CREATE (ChrisC:Person {name:'Chris Columbus', born:1958})
CREATE
(Robin)-[:ACTED_IN {roles:['Andrew Marin']}]->(BicentennialMan),
(OliverP)-[:ACTED_IN {roles:['Rupert Burns']}]->(BicentennialMan),
(ChrisC)-[:DIRECTED]->(BicentennialMan)
CREATE (CharlieWilsonsWar:Movie {title:"Charlie Wilson's War", released:2007, tagline:"A stiff drink. A little mascara. A lot of nerve. Who said they couldn't bring down the Soviet empire."})
CREATE (JuliaR:Person {name:'Julia Roberts', born:1967})
CREATE
(TomH)-[:ACTED_IN {roles:['Rep. Charlie Wilson']}]->(CharlieWilsonsWar),
(JuliaR)-[:ACTED_IN {roles:['Joanne Herring']}]->(CharlieWilsonsWar),
(PhilipH)-[:ACTED_IN {roles:['Gust Avrakotos']}]->(CharlieWilsonsWar),
(MikeN)-[:DIRECTED]->(CharlieWilsonsWar)
CREATE (ThePolarExpress:Movie {title:'The Polar Express', released:2004, tagline:'This Holiday Season… Believe'})
CREATE
(TomH)-[:ACTED_IN {roles:['Hero Boy', 'Father', 'Conductor', 'Hobo', 'Scrooge', 'Santa Claus']}]->(ThePolarExpress),
(RobertZ)-[:DIRECTED]->(ThePolarExpress)
CREATE (ALeagueofTheirOwn:Movie {title:'A League of Their Own', released:1992, tagline:'Once in a lifetime you get a chance to do something different.'})
CREATE (Madonna:Person {name:'Madonna', born:1954})
CREATE (GeenaD:Person {name:'Geena Davis', born:1956})
CREATE (LoriP:Person {name:'Lori Petty', born:1963})
CREATE (PennyM:Person {name:'Penny Marshall', born:1943})
CREATE
(TomH)-[:ACTED_IN {roles:['Jimmy Dugan']}]->(ALeagueofTheirOwn),
(GeenaD)-[:ACTED_IN {roles:['Dottie Hinson']}]->(ALeagueofTheirOwn),
(LoriP)-[:ACTED_IN {roles:['Kit Keller']}]->(ALeagueofTheirOwn),
(RosieO)-[:ACTED_IN {roles:['Doris Murphy']}]->(ALeagueofTheirOwn),
(Madonna)-[:ACTED_IN {roles:['"All the Way" Mae Mordabito']}]->(ALeagueofTheirOwn),
(BillPax)-[:ACTED_IN {roles:['Bob Hinson']}]->(ALeagueofTheirOwn),
(PennyM)-[:DIRECTED]->(ALeagueofTheirOwn)
CREATE (PaulBlythe:Person {name:'Paul Blythe'})
CREATE (AngelaScope:Person {name:'Angela Scope'})
CREATE (JessicaThompson:Person {name:'Jessica Thompson'})
CREATE (JamesThompson:Person {name:'James Thompson'})
CREATE
(JamesThompson)-[:FOLLOWS]->(JessicaThompson),
(AngelaScope)-[:FOLLOWS]->(JessicaThompson),
(PaulBlythe)-[:FOLLOWS]->(AngelaScope)
CREATE
(JessicaThompson)-[:REVIEWED {summary:'An amazing journey', rating:95}]->(CloudAtlas),
(JessicaThompson)-[:REVIEWED {summary:'Silly, but fun', rating:65}]->(TheReplacements),
(JamesThompson)-[:REVIEWED {summary:'The coolest football movie ever', rating:100}]->(TheReplacements),
(AngelaScope)-[:REVIEWED {summary:'Pretty funny at times', rating:62}]->(TheReplacements),
(JessicaThompson)-[:REVIEWED {summary:'Dark, but compelling', rating:85}]->(Unforgiven),
(JessicaThompson)-[:REVIEWED {summary:"Slapstick redeemed only by the Robin Williams and Gene Hackman's stellar performances", rating:45}]->(TheBirdcage),
(JessicaThompson)-[:REVIEWED {summary:'A solid romp', rating:68}]->(TheDaVinciCode),
(JamesThompson)-[:REVIEWED {summary:'Fun, but a little far fetched', rating:65}]->(TheDaVinciCode),
(JessicaThompson)-[:REVIEWED {summary:'You had me at Jerry', rating:92}]->(JerryMaguire)
;"""
s.run(q);
```
## Queries
### Find all actors
```
q = """
MATCH (a:Person)
RETURN DISTINCT a.name AS Actor
ORDER BY a.name
LIMIT 5
"""
s.run(q).values()
```
### Find all movies
```
q = """
MATCH (m:Movie)
RETURN DISTINCT m.title AS Movie
ORDER BY m.title
LIMIT 5
"""
s.run(q).values()
```
### Find movies that Keanu Reeeves acted in
```
q = """
MATCH (p:Person) -[:ACTED_IN]-> (m:Movie)
WHERE p.name = 'Keanu Reeves'
RETURN DISTINCT m.title
"""
s.run(q).values()
```
### Find movies that Keanu Reeves and Al Pacino both acted in
```
q = """
MATCH (p1:Person) -[:ACTED_IN]-> (m:Movie) <-[:ACTED_IN]- (p2:Person)
WHERE p1.name = 'Keanu Reeves' AND p2.name = 'Al Pacino'
RETURN DISTINCT m.title
"""
s.run(q).values()
```
### More efficient to use attributes
```
q = """
MATCH (p1:Person {name: 'Keanu Reeves'}) -[:ACTED_IN]-> (m:Movie) <-[:ACTED_IN]- (p2:Person {name: 'Al Pacino'})
RETURN DISTINCT m.title
"""
s.run(q).values()
```
### Find people who acted in Johnny Mnemonic
```
q = """
MATCH (m:Movie {title: 'Johnny Mnemonic'}) <-[:ACTED_IN]- (p:Person)
RETURN p.name
"""
s.run(q).values()
```
### Find oldest actor
```
q = """
MATCH (p:Person) -[:ACTED_IN]-> (:Movie)
WHERE EXISTS(p.born)
RETURN DISTINCT p.name, p.born
ORDER BY p.born ASC
limit 1
"""
s.run(q).values()
```
### Find youngest actor
```
q = """
MATCH (p:Person) -[:ACTED_IN]-> (:Movie)
WHERE EXISTS(p.born)
RETURN DISTINCT p.name, p.born
ORDER BY p.born DESC
limit 1
"""
s.run(q).values()
```
### Find shortest path between oldest and youngest actor
```
q = """
MATCH p=shortestPath(
(:Person {name:"Max von Sydow"})-[*]-(:Person {name:"Jonathan Lipnicki"})
)
RETURN p
"""
s.run(q).values()
```
### Store result in Python variable
```
q = """
MATCH (p:Person) -[:ACTED_IN]-> (m:Movie)
WHERE p.name = 'Keanu Reeves'
RETURN DISTINCT m.title
"""
vals = s.run(q).values()
len(vals)
vals
```
## Delete all notes and relationships
```
q = """
MATCH (n)
DETACH DELETE n
"""
s.run(q).values()
```
## Clean up
```
s.close()
g.close()
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Chapter-1---Exploring-Tick,-Volume,-DV-Bars" data-toc-modified-id="Chapter-1---Exploring-Tick,-Volume,-DV-Bars-1" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">1 </span>Chapter 1 - Exploring Tick, Volume, DV Bars</a></span><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1.1" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">1.1 </span>Introduction</a></span></li><li><span><a href="#Read-and-Clean-Data" data-toc-modified-id="Read-and-Clean-Data-1.2" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">1.2 </span>Read and Clean Data</a></span></li><li><span><a href="#Remove-Obvious-Price-Errors-in-Tick-Data" data-toc-modified-id="Remove-Obvious-Price-Errors-in-Tick-Data-1.3" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">1.3 </span>Remove Obvious Price Errors in Tick Data</a></span></li></ul></li><li><span><a href="#Tick-Bars" data-toc-modified-id="Tick-Bars-2" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">2 </span>Tick Bars</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Bonus-Exercise:-Make-OHLC-Bars-from-Custom-Bars" data-toc-modified-id="Bonus-Exercise:-Make-OHLC-Bars-from-Custom-Bars-2.0.1" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">2.0.1 </span>Bonus Exercise: Make OHLC Bars from Custom Bars</a></span></li></ul></li></ul></li><li><span><a href="#Volume-Bars" data-toc-modified-id="Volume-Bars-3" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">3 </span>Volume Bars</a></span></li><li><span><a href="#Dollar-Value-Bars" data-toc-modified-id="Dollar-Value-Bars-4" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">4 </span>Dollar Value Bars</a></span></li><li><span><a href="#Analyzing-the-Bars" data-toc-modified-id="Analyzing-the-Bars-5" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">5 </span>Analyzing the Bars</a></span><ul class="toc-item"><li><span><a href="#Count-Quantity-of-Bars-By-Each-Bar-Type-(Weekly)" data-toc-modified-id="Count-Quantity-of-Bars-By-Each-Bar-Type-(Weekly)-5.1" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">5.1 </span>Count Quantity of Bars By Each Bar Type (Weekly)</a></span></li><li><span><a href="#Which-Bar-Type-Has-Most-Stable-Counts?" data-toc-modified-id="Which-Bar-Type-Has-Most-Stable-Counts?-5.2" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">5.2 </span>Which Bar Type Has Most Stable Counts?</a></span></li><li><span><a href="#Which-Bar-Type-Has-the-Lowest-Serial-Correlation?" data-toc-modified-id="Which-Bar-Type-Has-the-Lowest-Serial-Correlation?-5.3" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">5.3 </span>Which Bar Type Has the Lowest Serial Correlation?</a></span></li><li><span><a href="#Partition-Bar-Series-into-Monthly,-Compute-Variance-of-Returns,-and-Variance-of-Variance" data-toc-modified-id="Partition-Bar-Series-into-Monthly,-Compute-Variance-of-Returns,-and-Variance-of-Variance-5.4" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">5.4 </span>Partition Bar Series into Monthly, Compute Variance of Returns, and Variance of Variance</a></span></li><li><span><a href="#Compute-Jarque-Bera-Test,-Which-Has-Lowest-Test-Statistic?" data-toc-modified-id="Compute-Jarque-Bera-Test,-Which-Has-Lowest-Test-Statistic?-5.5" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">5.5 </span>Compute Jarque-Bera Test, Which Has Lowest Test Statistic?</a></span></li><li><span><a href="#Compute-Shapiro-Wilk-Test" data-toc-modified-id="Compute-Shapiro-Wilk-Test-5.6" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">5.6 </span>Compute Shapiro-Wilk Test</a></span></li></ul></li><li><span><a href="#Compare-Serial-Correlation-between-Dollar-and-Dollar-Imbalance-Bars" data-toc-modified-id="Compare-Serial-Correlation-between-Dollar-and-Dollar-Imbalance-Bars-6" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">6 </span>Compare Serial Correlation between Dollar and Dollar Imbalance Bars</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Update-[05.04.18]" data-toc-modified-id="Update-[05.04.18]-6.0.1" data-vivaldi-spatnav-clickable="1"><span class="toc-item-num">6.0.1 </span>Update [05.04.18]</a></span></li></ul></li></ul></li></ul></div>
Advances in Machine Learning
# Chapter 1 - Exploring Tick, Volume, DV Bars
```
%load_ext watermark
%watermark
%load_ext autoreload
%autoreload 2
# import standard libs
from IPython.display import display
from IPython.core.debugger import set_trace as bp
from pathlib import PurePath, Path
import sys
import time
from collections import OrderedDict as od
import re
import os
import json
os.environ['THEANO_FLAGS'] = 'device=cpu,floatX=float32'
# import python scientific stack
import pandas as pd
import pandas_datareader.data as web
pd.set_option('display.max_rows', 100)
from dask import dataframe as dd
from dask.diagnostics import ProgressBar
pbar = ProgressBar()
pbar.register()
import numpy as np
import scipy.stats as stats
import statsmodels.api as sm
from numba import jit
import math
import pymc3 as pm
from theano import shared, theano as tt
# import visual tools
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
import seaborn as sns
plt.style.use('seaborn-talk')
plt.style.use('bmh')
#plt.rcParams['font.family'] = 'DejaVu Sans Mono'
#plt.rcParams['font.size'] = 9.5
plt.rcParams['font.weight'] = 'medium'
#plt.rcParams['figure.figsize'] = 10,7
blue, green, red, purple, gold, teal = sns.color_palette('colorblind', 6)
# import util libs
import pyarrow as pa
import pyarrow.parquet as pq
from tqdm import tqdm, tqdm_notebook
import warnings
warnings.filterwarnings("ignore")
import missingno as msno
from src.utils.utils import *
from src.features.bars import get_imbalance
import src.features.bars as brs
import src.features.snippets as snp
RANDOM_STATE = 777
print()
%watermark -p pandas,pandas_datareader,dask,numpy,pymc3,theano,sklearn,statsmodels,scipy,matplotlib,seaborn,pyarrow,fastparquet
```
## Introduction
This notebook explores the idea of sampling prices as a function of something other than fixed time intervals. For example using the number of ticks, volume or dollar volume traded as the sampling interval. The rest of this notebook works through some of the exercises found in chapters 1 and 2 of the book.
This notebook makes use of the following script found here: `./src/features/bars.py`
## Read and Clean Data
The data set used in this example is too large to be hosted on github. It is a sample of equity tick data, symbol `IVE`, provided by [kibot.com (caution: download link)](http://api.kibot.com/?action=history&symbol=IVE&interval=tickbidask&bp=1&user=guest). Download this data to the `./data/raw/` directory in your local repo.
```
def read_kibot_ticks(fp):
# read tick data from http://www.kibot.com/support.aspx#data_format
cols = list(map(str.lower,['Date','Time','Price','Bid','Ask','Size']))
df = (pd.read_csv(fp, header=None)
.rename(columns=dict(zip(range(len(cols)),cols)))
.assign(dates=lambda df: (pd.to_datetime(df['date']+df['time'],
format='%m/%d/%Y%H:%M:%S')))
.assign(v=lambda df: df['size']) # volume
.assign(dv=lambda df: df['price']*df['size']) # dollar volume
.drop(['date','time'],axis=1)
.set_index('dates')
.drop_duplicates())
return df
infp = PurePath(data_dir/'raw'/'IVE_tickbidask.txt')
df = read_kibot_ticks(infp)
cprint(df)
```
Save initial processed data as parquet in the `./data/interim/` folder and reload.
```
outfp = PurePath(data_dir/'interim'/'IVE_tickbidask.parq')
df.to_parquet(outfp)
infp=PurePath(data_dir/'interim'/'IVE_tickbidask.parq')
df = pd.read_parquet(infp)
cprint(df)
msno.matrix(df)
```
## Remove Obvious Price Errors in Tick Data
```
sns.boxplot(df.price)
@jit(nopython=True)
def mad_outlier(y, thresh=3.):
'''
compute outliers based on mad
# args
y: assumed to be array with shape (N,1)
thresh: float()
# returns
array index of outliers
'''
median = np.median(y)
diff = np.sum((y - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
mad = mad_outlier(df.price.values.reshape(-1,1))
df.loc[mad]
sns.boxplot(df.loc[~mad].price)
```
Drop outliers from dataset and save cleaned data in the `./data/processed/` folder.
```
df = df.loc[~mad]
cprint(df)
outfp = PurePath(data_dir/'processed'/'clean_IVE_fut_prices.parq')
df.to_parquet(outfp)
infp=PurePath(data_dir/'processed'/'clean_IVE_fut_prices.parq')
df = pd.read_parquet(infp)
cprint(df)
```
# Tick Bars
```
def tick_bars(df, price_column, m):
'''
compute tick bars
# args
df: pd.DataFrame()
column: name for price data
m: int(), threshold value for ticks
# returns
idx: list of indices
'''
t = df[price_column]
ts = 0
idx = []
for i, x in enumerate(tqdm(t)):
ts += 1
if ts >= m:
idx.append(i)
ts = 0
continue
return idx
def tick_bar_df(df, price_column, m):
idx = tick_bars(df, price_column, m)
return df.iloc[idx].drop_duplicates()
```
There are many ways to choose `M`, or the threshold value for sampling prices. One way is based on ratios of total dollar value/volume traded vs number of ticks. The rest of the notebook uses an arbitrary but sensible `M` value. I leave it as an exercise for the reader to see how the results change based on different values of `M`.
```
n_ticks = df.shape[0]
volume_ratio = (df.v.sum()/n_ticks).round()
dollar_ratio = (df.dv.sum()/n_ticks).round()
print(f'num ticks: {n_ticks:,}')
print(f'volume ratio: {volume_ratio}')
print(f'dollar ratio: {dollar_ratio}')
tick_M = 100 # arbitrary
print(f'tick threshold: {tick_M:,}')
tidx = tick_bars(df, 'price', tick_M)
tidx[:10]
df.iloc[tidx].shape, df.shape
```
Dataset is large so select smaller example for quick exploration
```
tick_df = tick_bar_df(df, 'price', tick_M)
tick_df.shape
def select_sample_data(ref, sub, price_col, date):
'''
select a sample of data based on date, assumes datetimeindex
# args
ref: pd.DataFrame containing all ticks
sub: subordinated pd.DataFrame of prices
price_col: str(), price column
date: str(), date to select
# returns
xdf: ref pd.Series
xtdf: subordinated pd.Series
'''
xdf = ref[price_col].loc[date]
xtdf = sub[price_col].loc[date]
return xdf, xtdf
## try different dates to see how the quantity of tick bars changes
xDate ='2009-10-01' #'2017-10-4'
xdf, xtdf = select_sample_data(df, tick_df, 'price', xDate)
xdf.shape, xtdf.shape
def plot_sample_data(ref, sub, bar_type, *args, **kwds):
f,axes=plt.subplots(3,sharex=True, sharey=True, figsize=(10,7))
ref.plot(*args, **kwds, ax=axes[0], label='price')
sub.plot(*args, **kwds, ax=axes[0], marker='X', ls='', label=bar_type)
axes[0].legend();
ref.plot(*args, **kwds, ax=axes[1], label='price', marker='o')
sub.plot(*args, **kwds, ax=axes[2], ls='', marker='X',
color='r', label=bar_type)
for ax in axes[1:]: ax.legend()
plt.tight_layout()
return
plot_sample_data(xdf, xtdf, 'tick bar', alpha=0.5, markersize=7)
```
### Bonus Exercise: Make OHLC Bars from Custom Bars
Extract `tick_df.price` and `df.price` into two pandas series.
```
sub = tick_df.price
ref = df.price
```
The function below creates the OHLC dataframe by:
1. Iterating over the subordinated series' index extracting idx and idx+1 period
2. Selecting the same date period from the reference series
3. Extracting the max, min prices from the reference series.
4. Combining the o,h,l,c and start and end timestamps into a row
5. Returning the aggregated rows as a pandas dataframe.
```
def get_ohlc(ref, sub):
'''
fn: get ohlc from custom bars
# args
ref : reference pandas series with all prices
sub : custom tick pandas series
# returns
tick_df : dataframe with ohlc values
'''
ohlc = []
for i in tqdm(range(sub.index.shape[0]-1)):
start,end = sub.index[i], sub.index[i+1]
tmp_ref = ref.loc[start:end]
max_px, min_px = tmp_ref.max(), tmp_ref.min()
o,h,l,c = sub.iloc[i], max_px, min_px, sub.iloc[i+1]
ohlc.append((end,start,o,h,l,c))
cols = ['end','start','open','high','low','close']
return (pd.DataFrame(ohlc,columns=cols))
## uncomment below to run (takes about 5-6 mins on my machine)
#tick_bars_ohlc = get_ohlc(ref, sub)
#cprint(tick_bars_ohlc)
#outfp = PurePath(data_dir/'processed'/'tick_bars_ohlc.parq')
#tick_bars_ohlc.to_parquet(outfp)
```
# Volume Bars
```
def volume_bars(df, volume_column, m):
'''
compute volume bars
# args
df: pd.DataFrame()
volume_column: name for volume data
m: int(), threshold value for volume
# returns
idx: list of indices
'''
t = df[volume_column]
ts = 0
idx = []
for i, x in enumerate(tqdm(t)):
ts += x
if ts >= m:
idx.append(i)
ts = 0
continue
return idx
def volume_bar_df(df, volume_column, m):
idx = volume_bars(df, volume_column, m)
return df.iloc[idx].drop_duplicates()
volume_M = 10_000 # arbitrary
print(f'volume threshold: {volume_M:,}')
v_bar_df = volume_bar_df(df, 'v', 'price', volume_M)
cprint(v_bar_df)
xDate = '2009-10-1'
xdf, xtdf = select_sample_data(df, v_bar_df, 'price', xDate)
print(f'xdf shape: {xdf.shape}, xtdf shape: {xtdf.shape}')
plot_sample_data(xdf, xtdf, 'volume bar', alpha=0.5, markersize=7)
```
# Dollar Value Bars
```
def dollar_bars(df, dv_column, m):
'''
compute dollar bars
# args
df: pd.DataFrame()
dv_column: name for dollar volume data
m: int(), threshold value for dollars
# returns
idx: list of indices
'''
t = df[column]
ts = 0
idx = []
for i, x in enumerate(tqdm(t)):
ts += x
if ts >= m:
idx.append(i)
ts = 0
continue
return idx
def dollar_bar_df(df, dv_column, m):
idx = dollar_bars(df, dv_column, m)
return df.iloc[idx].drop_duplicates()
dollar_M = 1_000_000 # arbitrary
print(f'dollar threshold: {dollar_M:,}')
dv_bar_df = dollar_bar_df(df, 'dv', 'price', dollar_M)
cprint(dv_bar_df)
xDate = '2009-10-1'
xdf, xtdf = select_sample_data(df, dv_bar_df, 'price', xDate)
print(f'xdf shape: {xdf.shape}, xtdf shape: {xtdf.shape}')
plot_sample_data(xdf, xtdf, 'dollar bar', alpha=0.5, markersize=7)
```
# Analyzing the Bars
## Count Quantity of Bars By Each Bar Type (Weekly)
```
def count_bars(df, price_col='price'):
return df.groupby(pd.TimeGrouper('1W'))[price_col].count()
def scale(s):
return (s-s.min())/(s.max()-s.min())
# count series
# scale to compare 'apples to apples'
tc = scale(count_bars(tick_df))
vc = scale(count_bars(v_bar_df))
dc = scale(count_bars(dv_bar_df))
dfc = scale(count_bars(df))
# plot time series of count
f,ax=plt.subplots(figsize=(10,7))
tc.plot(ax=ax, ls='-', label='tick count')
vc.plot(ax=ax, ls='--', label='volume count')
dc.plot(ax=ax, ls='-.', label='dollar count')
ax.set_title('scaled bar counts')
ax.legend()
```
## Which Bar Type Has Most Stable Counts?
```
print(f'tc std: {tc.std():.2%}, vc std: {vc.std():.2%}, dc std: {dc.std():.2%}')
bar_types = ['tick','volume','dollar','df']
bar_std = [tc.std(),vc.std(),dc.std(),dfc.std()]
counts = (pd.Series(bar_std,index=bar_types))
counts.sort_values()
```
## Which Bar Type Has the Lowest Serial Correlation?
```
def returns(s):
arr = np.diff(np.log(s))
return (pd.Series(arr, index=s.index[1:]))
tr = returns(tick_df.price)
vr = returns(v_bar_df.price)
dr = returns(dv_bar_df.price)
df_ret = returns(df.price)
bar_returns = [tr, vr, dr, df_ret]
def get_test_stats(bar_types,bar_returns,test_func,*args,**kwds):
dct = {bar:(int(bar_ret.shape[0]), test_func(bar_ret,*args,**kwds))
for bar,bar_ret in zip(bar_types,bar_returns)}
df = (pd.DataFrame.from_dict(dct)
.rename(index={0:'sample_size',1:f'{test_func.__name__}_stat'})
.T)
return df
autocorrs = get_test_stats(bar_types,bar_returns,pd.Series.autocorr)
display(autocorrs.sort_values('autocorr_stat'),
autocorrs.abs().sort_values('autocorr_stat'))
def plot_autocorr(bar_types,bar_returns):
f,axes=plt.subplots(len(bar_types),figsize=(10,7))
for i, (bar, typ) in enumerate(zip(bar_returns, bar_types)):
sm.graphics.tsa.plot_acf(bar, lags=120, ax=axes[i],
alpha=0.05, unbiased=True, fft=True,
zero=False,
title=f'{typ} AutoCorr')
plt.tight_layout()
def plot_hist(bar_types,bar_rets):
f,axes=plt.subplots(len(bar_types),figsize=(10,6))
for i, (bar, typ) in enumerate(zip(bar_returns, bar_types)):
g = sns.distplot(bar, ax=axes[i], kde=False, label=typ)
g.set(yscale='log')
axes[i].legend()
plt.tight_layout()
plot_autocorr(bar_types,bar_returns)
plot_hist(bar_types,bar_returns)
```
## Partition Bar Series into Monthly, Compute Variance of Returns, and Variance of Variance
```
def partition_monthly(s):
return s.resample('1M').var()
tr_rs = partition_monthly(tr)
vr_rs = partition_monthly(vr)
dr_rs = partition_monthly(dr)
df_ret_rs = partition_monthly(df_ret)
monthly_vars = [tr_rs, vr_rs, dr_rs, df_ret_rs]
get_test_stats(bar_types,monthly_vars,np.var).sort_values('var_stat')
```
## Compute Jarque-Bera Test, Which Has Lowest Test Statistic?
```
def jb(x,test=True):
np.random.seed(12345678)
if test: return stats.jarque_bera(x)[0]
return stats.jarque_bera(x)[1]
get_test_stats(bar_types,bar_returns,jb).sort_values('jb_stat')
```
## Compute Shapiro-Wilk Test
Shapiro-Wilk test statistic > larger is better.
```
def shapiro(x,test=True):
np.random.seed(12345678)
if test: return stats.shapiro(x)[0]
return stats.shapiro(x)[1]
(get_test_stats(bar_types,bar_returns,shapiro)
.sort_values('shapiro_stat')[::-1])
```
# Compare Serial Correlation between Dollar and Dollar Imbalance Bars
### Update [05.04.18]
Earlier version was missing some additional code. Before we can compare we must compute the Dollar Imbalance Bar. This is my initial implementation of this concept but is experimental and may need some adjustments.
1. Compute the sequence ${bt}_{t=1,...,T}$.
2. Compute the imbalance at time $T$ defined as $\theta_T = \sum_{t=1}^{T}b_tv_t$.
3. Compute the expected value of $T$ as ewma of previous $T$ values.
4. Compute the expected value of $\theta_T$ as ewma of $b_tv_t$ values.
5. for each index:
- compute $\lvert\theta_t\rvert >= E_0[T] * \lvert2v^+-E_0[v_t]\rvert$
- if the condition is met capture the quantity of ticks
- reset tick count
- continue
```
tidx = get_imbalance(df.price.values)*df.dv.iloc[1:]
cprint(tidx)
wndo = tidx.shape[0]//1000
print(f'window size: {wndo:,.2f}')
## Expected value of bs approximated by ewm
E_bs = tidx.ewm(wndo).mean() # expected `bs`
## what is E_T???
## in this implementation E_T is ewm of index values
E_T = pd.Series(range(tidx.shape[0]), index=tidx.index).ewm(wndo).mean()
df0 =(pd.DataFrame().assign(bs=tidx)
.assign(E_T=E_T).assign(E_bs=E_bs)
.assign(absMul=lambda df: df.E_T*np.abs(df.E_bs))
.assign(absTheta=tidx.cumsum().abs()))
cprint(df0)
df0[['E_T','E_bs']].plot(subplots=True, figsize=(10,6));
display(df0.describe()/1000)
(df0.loc['2010-06',['absMul','absTheta']]
.reset_index(drop=True)
.plot(figsize=(10,5)))
def test_t_abs(absTheta,t,E_bs):
"""
Bool function to test inequality
*row is assumed to come from df.itertuples()
-absTheta: float(), row.absTheta
-t: pd.Timestamp()
-E_bs: float(), row.E_bs
"""
return (absTheta >= t*E_bs)
def agg_imbalance_bars(df):
"""
Implements the accumulation logic
"""
start = df.index[0]
bars = []
for row in df.itertuples():
t_abs = row.absTheta
rowIdx = row.Index
E_bs = row.E_bs
t = df.loc[start:rowIdx].shape[0]
if t<1: t=1 # if t lt 1 set equal to 1
if test_t_abs(t_abs,t,E_bs):
bars.append((start,rowIdx,t))
start = rowIdx
return bars
bars = agg_imbalance_bars(df0)
test_imb_bars = (pd.DataFrame(bars,columns=['start','stop','Ts'])
.drop_duplicates())
cprint(test_imb_bars)
test_imb_bars.Ts.describe().round()
test_imb_bars.set_index('stop')['Ts'].plot()
dvImbBars = df.price.loc[test_imb_bars.stop].drop_duplicates()
cprint(dvImbBars)
dvBar = dv_bar_df.price
cprint(dvBar)
dr = returns(dv_bar_df.price)
drImb = returns(dvImbBars)
bar_types = ['dvBar','dvImb']
bar_rets = [dr, drImb]
get_test_stats(bar_types,bar_rets,pd.Series.autocorr)
plot_autocorr(bar_types,bar_returns)
plot_hist(bar_types,bar_returns)
jbs = get_test_stats(bar_types,bar_returns,jb).sort_values('jb_stat')
shaps = (get_test_stats(bar_types,bar_returns,shapiro)
.sort_values('shapiro_stat')[::-1])
display(jbs,shaps)
```
| github_jupyter |
# Feature Engineering
Author : [Alexandre Gramfort](http://alexandre.gramfort.net)
with some code snippets from [Olivier Grisel](http://ogrisel.com/) (leaf encoder)
It is the most creative aspect of Data Science!
We will use here the Titanic dataset.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
df = sns.load_dataset("titanic")
df.head()
```
Let's look at the dtypes of the different columns. You will observe that it contains columns that
are explicitly marked as `category`.
```
df.info()
```
This allows you to do things like:
```
from sklearn.compose import make_column_selector
make_column_selector(dtype_include='category')(df)
```
in order to get quickly the names of the columns to treat as categorical.
As you can see the data contains both quantitative and categorical variables. These categorical have some predictive power:
```
sns.catplot(data=df, x='pclass', y='survived', hue='sex', kind='bar')
```
The question is how to feed these non-quantitative features to a supervised learning model?
## Categorical features
- Nearly always need some treatment
- High cardinality can create very sparse data
- Difficult to impute missing
### One-Hot encoding
**Idea:** Each category is coded as a 0 or 1 in a dedicated column.
- It is the most basic method. It is used with most linear algorithms
- Drop first column to avoid collinearity
- It uses sparse format which is memory-friendly
- Most current implementations don’t gracefully treat missing, unseen variables
Example with the `embarked` column. We have here 3 categories:
```
df['embarked'].value_counts()
df1 = df[['embarked']]
df1.head(10)
```
Let's use a [scikit-learn OneHotEncoder](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html)
```
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder()
ohe.fit_transform(df1.head(10)).toarray()
```
To know which column corresponds to what you can look at:
```
ohe.categories_
```
Basically the first column will be a 1 if category was 'C', etc.
Now if we have missing values:
```
ohe = OneHotEncoder()
ohe.fit_transform(df1).toarray()
```
We have now 4 columns, one corresponding to NaNs:
```
ohe.categories_
```
As the columns are linearly dependant after one-hot encoding you can drop one column with:
```
OneHotEncoder(drop='first').fit_transform(df1.head(10)).toarray()
```
This avoids colinearity, which for example leads to slower optimization solvers.
# Ordinal encoding
**Idea:** Each category is coded with a different integer. The order being arbitrary.
- Give every categorical variable a unique numerical ID
- Useful for non-linear tree-based algorithms (forests, gradient-boosting)
- Does not increase dimensionality
```
from sklearn.preprocessing import OrdinalEncoder
oe = OrdinalEncoder()
oe.fit_transform(df1.head(10))
oe.categories_
```
This means that 'C' will be coded as 0, 'Q' as a 1 and 'S' as a 2.
## Count encoding
**Idea:** Replace categorical variables with their count in the train set
- Useful for both linear and non-linear algorithms
- Can be sensitive to outliers
- May add log-transform, works well with counts
- Replace unseen variables with `1`
- May give collisions: same encoding, different variables
You'll need to install the `category_encoders` package with:
pip install category_encoders
```
import category_encoders as ce
ce.__version__
df1.head(10)
ce.CountEncoder().fit_transform(df1.head(10)).values
```
'S' is replaced by 7 as it appears 7 times in the fitted data, etc.
## Label / Ordinal count encoding
**Idea:** Rank categorical variables by count and use this rank as encoding value. It is an ordinal encoding where the value is taking from the frequence of each category.
- Useful for both linear and non-linear algorithms
- Not sensitive to outliers
- Won’t give same encoding to different variables
- Best of both worlds
As it is not available in any package we will implement this ourselves:
```
from sklearn.preprocessing import OrdinalEncoder
class CountOrdinalEncoder(OrdinalEncoder):
"""Encode categorical features as an integer array
usint count information.
"""
def __init__(self, categories='auto', dtype=np.float64):
self.categories = categories
self.dtype = dtype
def fit(self, X, y=None):
"""Fit the OrdinalEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to determine the categories of each feature.
Returns
-------
self
"""
self.handle_unknown = 'use_encoded_value'
self.unknown_value = np.nan
super().fit(X)
X_list, _, _ = self._check_X(X)
# now we'll reorder by counts
for k, cat in enumerate(self.categories_):
counts = []
for c in cat:
counts.append(np.sum(X_list[k] == c))
order = np.argsort(counts)
self.categories_[k] = cat[order]
return self
coe = CountOrdinalEncoder()
coe.fit_transform(pd.DataFrame(df1.head(10)))
```
'S' is replace by 2 as it's the most frequent, then 'C' is 1 and 'Q' is 0.
This encoding is robust to collision which can happen with the CountEncoder when certain categories happen the same number of times. Example:
```
coe.fit_transform(pd.DataFrame(['es', 'fr', 'fr', 'en', 'en', 'es']))
```
vs.
```
ce.CountEncoder().fit_transform(pd.DataFrame(['es', 'fr', 'fr', 'en', 'en', 'es']))
```
# Hash encoding
**Idea:** Does “OneHot-encoding” with arrays of a fixed length.
- Avoids extremely sparse data
- May introduce collisions
- Can repeat with different hash functions and bag result for small bump in accuracy
- Collisions usually degrade results, but may improve it.
- Gracefully deals with new variables (eg: new user-agents)
```
df1.head(10)
ce.hashing.HashingEncoder(n_components=4).fit_transform(df1.head(10).values)
```
## Target encoding
Encode categorical variables by their ratio of target (binary classification or regression)
Formula reads:
$$
TE(X) = \alpha(n(X)) E[ y | x=X ] + (1 - \alpha(n(X))) E[y]
$$
where $n(X)$ is the count of category $X$ and $\alpha$ is a monotonically increasing function bounded between 0 and 1.[1].
- Add smoothing to avoid setting variable encodings to 0.
```
[1] Micci-Barreca, 2001: A preprocessing scheme for
high-cardinality categorical attributes in classification
and prediction problems.
```
You will need the [dirty cat](https://pypi.org/project/dirty-cat/) package. You can install it with:
pip install dirty_cat
```
import dirty_cat as dc # install with: pip install dirty_cat
X = np.array(['A', 'B', 'C', 'A', 'B', 'B'])[:, np.newaxis]
y = np.array([1 , 1 , 1 , 0 , 0 , 1])
dc.TargetEncoder(clf_type='binary-clf').fit_transform(X, y)
# If \alpha was 1 you would get: [0.5, 0.66, 1, 0.5, 0.66, 0.66]
```
## NaN encoding
It is quite frequent in real life that the fact one variable is missing
has some predictive power. For example in the Titanic dataset the 'deck'
parameter is very often missing and it is missing often for passengers who
did not have a proper cabin and there who were most likely to die.
To inform your supervised model you can explicit encode the missingness
with a dedicated column.
You can do this with a [SimpleImputer](https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html)
```
from sklearn.impute import SimpleImputer
X = np.array([0, 1., np.nan, 2., 0.])[:, None]
SimpleImputer(strategy='median', add_indicator=True).fit_transform(X)
```
or [MissingIndicator](https://scikit-learn.org/stable/modules/generated/sklearn.impute.MissingIndicator.html)
```
from sklearn.impute import MissingIndicator
X = np.array([0, 1., np.nan, 2., 0.])[:, None]
MissingIndicator().fit_transform(X)
```
## Polynomial encoding
**Idea:** Encode interactions between categorical variables
- Linear algorithms without interactions can not solve the XOR problem
- A polynomial kernel *can* solve XOR
```
X = np.array([[0, 1], [1, 1], [1, 0], [0, 0]])
X
from sklearn.preprocessing import PolynomialFeatures
PolynomialFeatures(include_bias=False, interaction_only=True).fit_transform(X)
```
## To go beyond
You can also use some form of embedding eg using a Neural Network to create dense embeddings from categorical variables.
- Map categorical variables in a function approximation problem into Euclidean spaces
- Faster model training.
- Less memory overhead.
- Can give better accuracy than 1-hot encoded.
- See for example https://arxiv.org/abs/1604.06737
# Binning
See https://scikit-learn.org/stable/auto_examples/preprocessing/plot_discretization_classification.html
[KBinsDiscretizer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.KBinsDiscretizer.html) allows you to estimate non-linear model in the original feature space while only using a linear logistic regression.
See this [example in regression](https://scikit-learn.org/stable/auto_examples/preprocessing/plot_discretization.html).
What it does:
```
from sklearn.preprocessing import KBinsDiscretizer
rng = np.random.RandomState(42)
X = rng.randn(10, 2)
X
KBinsDiscretizer(n_bins=2).fit_transform(X).toarray()
```
# Scaling
Scale to numerical variables into a certain range
- Standard (Z) Scaling
- MinMax Scaling
- Root scaling
- Log scaling
```
from sklearn.preprocessing import StandardScaler, MinMaxScaler
rng = np.random.RandomState(42)
X = 10 + rng.randn(10, 1)
X
StandardScaler().fit_transform(X)
MinMaxScaler().fit_transform(X)
from sklearn.preprocessing import FunctionTransformer
X = np.arange(1, 10)[:, np.newaxis]
FunctionTransformer(func=np.log).fit_transform(X)
```
# Leaf coding
The following is an implementation of a trick found in:
Practical Lessons from Predicting Clicks on Ads at Facebook
Junfeng Pan, He Xinran, Ou Jin, Tianbing XU, Bo Liu, Tao Xu, Yanxin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers, Joaquin Quiñonero Candela
International Workshop on Data Mining for Online Advertising (ADKDD)
https://research.fb.com/wp-content/uploads/2016/11/practical-lessons-from-predicting-clicks-on-ads-at-facebook.pdf
```
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import LabelBinarizer
from scipy.sparse import hstack
class TreeTransform(BaseEstimator, TransformerMixin):
"""One-hot encode samples with an ensemble of trees
This transformer first fits an ensemble of trees (e.g. gradient
boosted trees or a random forest) on the training set.
Then each leaf of each tree in the ensembles is assigned a fixed
arbitrary feature index in a new feature space. If you have 100
trees in the ensemble and 2**3 leafs per tree, the new feature
space has 100 * 2**3 == 800 dimensions.
Each sample of the training set go through the decisions of each tree
of the ensemble and ends up in one leaf per tree. The sample if encoded
by setting features with those leafs to 1 and letting the other feature
values to 0.
The resulting transformer learn a supervised, sparse, high-dimensional
categorical embedding of the data.
This transformer is typically meant to be pipelined with a linear model
such as logistic regression, linear support vector machines or
elastic net regression.
"""
def __init__(self, estimator):
self.estimator = estimator
def fit(self, X, y):
self.fit_transform(X, y)
return self
def fit_transform(self, X, y):
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y)
self.binarizers_ = []
sparse_applications = []
estimators = np.asarray(self.estimator_.estimators_).ravel()
for t in estimators:
lb = LabelBinarizer(sparse_output=True)
X_leafs = t.tree_.apply(X.astype(np.float32))
sparse_applications.append(lb.fit_transform(X_leafs))
self.binarizers_.append(lb)
return hstack(sparse_applications)
def transform(self, X, y=None):
sparse_applications = []
estimators = np.asarray(self.estimator_.estimators_).ravel()
for t, lb in zip(estimators, self.binarizers_):
X_leafs = t.tree_.apply(X.astype(np.float32))
sparse_applications.append(lb.transform(X_leafs))
return hstack(sparse_applications)
boosted_trees = GradientBoostingClassifier(
max_leaf_nodes=5, learning_rate=0.1,
n_estimators=10, random_state=0,
)
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True)
TreeTransform(boosted_trees).fit_transform(X, y)
```
<div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>
Limiting yourself to LogisticRegression propose features to predict survival.
</li>
</ul>
</div>
```
from sklearn.linear_model import LogisticRegression
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import cross_val_score
y = df.survived.values
X = df.drop(['survived', 'alive'], axis=1)
X.head()
lr = LogisticRegression(solver='lbfgs')
ct = make_column_transformer(
(make_pipeline(SimpleImputer(), StandardScaler()), ['age', 'pclass', 'fare'])
)
clf = make_pipeline(ct, lr)
np.mean(cross_val_score(clf, X, y, cv=10))
```
### Now do better !
| github_jupyter |
```
%matplotlib inline
```
# Feature transformations with ensembles of trees
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
```
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(
X_train, y_train, test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression(max_iter=1000)
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression(max_iter=1000)
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
# Supervised transformation based on gradient boosted trees
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression(max_iter=1000)
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
```
| github_jupyter |
# Using AWS Lambda and PyWren for Landsat 8 Time Series
This notebook is a simple demonstration of drilling a timeseries of [NDVI](https://en.wikipedia.org/wiki/Normalized_difference_vegetation_index) values from the [Landsat 8 satellite images held on AWS](https://landsatonaws.com/). You can view these [time series of satellite images interactively](https://search.remotepixel.ca/#7.43/41.788/-88.447) as well, if you would like to see what they look like.
The code relies on the l8_ndvi_point function from the [remotepixel-api](https://github.com/RemotePixel/remotepixel-api) to compute an NDVI value for a given set of coordinates. It's recommended that you install this yourself, but currently a version of the API is accepting requests at the URL in the code below, and we will use this API (which is an AWS Lambda function) for the sake of this demo. It works by sending a request to an endpoint with a sceneID and a location like this:
[https://w5xm4e5886.execute-api.us-west-2.amazonaws.com/production/l8_ndvi_point?coords=-87.596890,41.7856533&scene=LC08_L1TP_023031_20191007_20191018_01_T1](https://w5xm4e5886.execute-api.us-west-2.amazonaws.com/production/l8_ndvi_point?coords=-87.596890,41.7856533&scene=LC08_L1TP_023031_20191007_20191018_01_T1)
This will return:
`{"ndvi": 0.21664535999298096, "date": "2019-10-07", "cloud": 0.07}`
If you haven't already, please [install/configure PyWren](http://pywren.io/pages/gettingstarted.html) in order to run this notebook. We will be using [PyWren](https://github.com/pywren/pywren) to call the Remote Pixel API in parallel on satellite images that were taken over the past seven years, calculating a single NDVI value for each image. The satellite images themselves are held in a public S3 bucket. Thus, we are taking advantage of two levels of serverless parallelism (see workflow below): one for the API calls and one for the calculations themselves.
Once we have the results back as a list of dictionaries, drawn from a timeseries of more than 100 images, we can simply plot the resulting timeseries or do further analysis. BUT, the points may well be cloud or cloud shadow contaminated. We haven’t done any cloud masking to the imagery, but we do have the scene metadata on the probable amount of cloud across the entire scene. We use this to weight a [smoothing spline](https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.interpolate.UnivariateSpline.html), such that an observation with no reported cloud over the scene has full weight, and an observation with a reported 100% of the scene with cloud has zero weight.
<img src="pywren_workflow.png" width="800">
Original Code Credit: Peter Scarth (Joint Remote Sensing Research Program)
```
import requests, json, numpy, datetime
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline
import pywren
# Function to return a Landsat 8 scene list given a Longitude,Latitude string
# This uses the amazing developmentseed Satellite API
# https://github.com/sat-utils/sat-api
def getSceneList(lonLat):
scenes=[]
url = "https://api.developmentseed.org/satellites/landsat"
params = dict(
contains=lonLat,
satellite_name="landsat-8",
limit="1000")
# Call the API to grab the scene metadata
sceneMetaData = json.loads(requests.get(url=url, params=params).content)
# Parse the metadata
for record in sceneMetaData["results"]:
scene = str(record['aws_index'].split('/')[-2])
if scene[-2:] == '01':
scene = scene[:-2] + '00'
if scene[-2:] == '02':
scene = scene[:-2] + '00'
if scene[-2:] == '03':
scene = scene[:-2] + '02'
scenes.append(scene)
return scenes
# Function to call a AWS Lambda function to drill a single pixel and compute the NDVI
def getNDVI(scene):
url = "https://w5xm4e5886.execute-api.us-west-2.amazonaws.com/production/l8_ndvi_point"
params = dict(
coords=lonLat,
scene=scene)
# Call the API and return the JSON results
resp = requests.get(url=url, params=params)
return json.loads(resp.text)
```
Let's compute the NDVI time series for the home of the MACSS program, [1155 E. 60th Street, Chicago, IL](https://www.google.com/maps/place/41%C2%B047'08.4%22N+87%C2%B035'48.8%22W).
```
%%time
# 1155 E. 60th Street, Chicago, IL (Home of MACSS Program)
lonLat = '-87.596890,41.7856533'
# Call the api to retrieve the scenes available under the point of interest
scenes = getSceneList(lonLat)
# Set up a pywren executor and map the NDVI retrieval across all the available scenes
pwex = pywren.default_executor()
timeSeries = pywren.get_all_results(pwex.map(getNDVI, scenes))
# Extract the data trom the list of results
timeStamps = [datetime.datetime.strptime(obs['date'],'%Y-%m-%d') for obs in timeSeries if 'date' in obs]
ndviSeries = [obs['ndvi'] for obs in timeSeries if 'ndvi' in obs]
cloudSeries = [obs['cloud']/100 for obs in timeSeries if 'cloud' in obs]
# Create a time variable as the x axis to fit the observations
# First we convert to seconds
timeSecs = numpy.array([(obsTime-datetime.datetime(1970,1,1)).total_seconds() for obsTime in timeStamps])
# And then normalise from 0 to 1 to avoid any numerical issues in the fitting
fitTime = ((timeSecs-numpy.min(timeSecs))/(numpy.max(timeSecs)-numpy.min(timeSecs)))
# Smooth the data by fitting a spline weighted by cloud amount
smoothedNDVI=UnivariateSpline(
fitTime[numpy.argsort(fitTime)],
numpy.array(ndviSeries)[numpy.argsort(fitTime)],
w=(1.0-numpy.array(cloudSeries)[numpy.argsort(fitTime)])**2.0,
k=2,
s=0.1)(fitTime)
# Setup the figure and plot the data, fit and cloud amount
fig = plt.figure(figsize=(16,10))
plt.plot(timeStamps,ndviSeries, 'gx',label='Raw NDVI Data')
plt.plot(timeStamps,ndviSeries, 'g:', linewidth=1)
plt.plot(timeStamps,cloudSeries, 'b.', linewidth=1,label='Scene Cloud Percent')
plt.plot(timeStamps,smoothedNDVI, 'r--', linewidth=3,label='Cloudfree Weighted Spline')
plt.xlabel('Date', fontsize=16)
plt.ylabel('NDVI', fontsize=16)
plt.title('AWS Lambda Landsat 8 NDVI Drill', fontsize=20)
plt.grid(True)
plt.ylim([-.1,1.0])
plt.legend(fontsize=14)
plt.show()
#plt.savefig('lambdaNDVI.png', bbox_inches='tight')
```
| github_jupyter |
```
import ROOT
import ostap.fixes.fixes
from ostap.core.core import cpp, Ostap
from ostap.core.core import pwd, cwd, ROOTCWD
from ostap.core.core import rootID, funcID, funID, fID, histoID, hID, dsID
from ostap.core.core import VE
from ostap.histos.histos import h1_axis, h2_axes, h3_axes
from ostap.histos.graphs import makeGraph, hToGraph, hToGraph2, hToGraph3, lw_graph
import ostap.trees.trees
import ostap.trees.cuts
import ostap.histos.param
import ostap.histos.compare
import ostap.io.root_file
import ostap.math.models
import ostap.fitting.roofit
import ostap.fitting.models as Models
canv = ROOT.TCanvas("canv","canv",900,450)
import numpy as np
from scipy.fftpack import fft, ifft
from scipy.fftpack import rfft, irfft
from scipy.stats import norm, chi2, beta
from math import pow, sqrt, atan, pi
N_EVENTS = 10
N_CHANNELS = 2692
BAN_LEVEL = 65
dumpfile = open("dump.txt","r")
max_diff = []
down_diff = []
ban_list = []
ev_num = 0
for line in dumpfile:
value_str_list = (line[:-1]).split(",")
value_list = []
for idx in range(N_CHANNELS):
value_list.append( float( value_str_list[idx] ) )
max_level = max( sum(value_list)/len(value_list) - min(value_list) ,
max(value_list) - sum(value_list)/len(value_list) )
max_diff .append ( max_level )
if sum(value_list)/len(value_list) - min(value_list) > max(value_list) - sum(value_list)/len(value_list) :
down_diff.append ( sum(value_list)/len(value_list) - min(value_list) )
if max_level > BAN_LEVEL:
ban_list.append( ev_num )
ev_num = ev_num + 1
dumpfile.close()
dumpfile = open("dump.txt","r")
ev_num = 0
dataset = []
events = 0
for line in dumpfile:
if ev_num not in ban_list :
value_str_list = (line[:-1]).split(",")
value_list = []
for idx in range(N_CHANNELS):
value_list.append( float( value_str_list[idx] ) )
dataset .append( np.array( value_list ) )
events = events + 1
ev_num = ev_num + 1
dumpfile.close()
fftset = []
print("Performing fast Fourier transformation")
for event in dataset:
fftset .append( fft( event ) )
ch=[]
vl=[]
idx = 0
EVENT_NUM = 5
for val in dataset[EVENT_NUM]:
ch.append( 20. + 40.*idx )
vl.append( val )
idx += 1
gr = makeGraph(ch,vl)
gr.SetTitle("EVENT #" + str(EVENT_NUM))
gr.GetXaxis().SetTitle("time, ns")
gr.GetXaxis().SetRangeUser(0,20000)
gr.GetYaxis().SetTitle("voltage, a.u.")
gr.Draw("AL")
canv.Draw()
datos = []
for evt_fft in fftset :
datos.append( np.abs( evt_fft[16] ) / N_CHANNELS )
import statistics as stat
print("mean = " + str(stat.mean(datos)))
print("stdev = " + str(stat.stdev(datos)))
LEN_DRAW = 100
h_abs = ROOT.TH1F("h_abs",";freq.ch;average abs value",LEN_DRAW,0.5,LEN_DRAW+0.5)
for chan in range(1,LEN_DRAW+1):
datos = []
for evt_fft in fftset :
datos.append( np.abs( evt_fft[chan] ) /N_CHANNELS )
h_abs[chan] = VE ( stat.mean(datos), stat.stdev(datos)**2 )
h_abs.Draw()
canv.Draw()
1./(len(dataset[5])*40.*1e-9/3.)
#LEN_DRAW = 100
#h_abs2 = ROOT.TH1F("h_abs",";freq., Hz;average abs value",LEN_DRAW,0.5,LEN_DRAW+0.5)
#for chan in range(1,LEN_DRAW+1):
# datos = []
# for evt_fft in fftset :
# datos.append( np.abs( evt_fft[chan] ) /N_CHANNELS )
# h_abs2[chan] = VE ( stat.mean(datos), stat.stdev(datos)**2 )
#h_abs2.Draw()
#canv.Draw()
fh=[]
fq=[]
idx = 0
CUT=300
EVENT_NUM = 5
for val in fftset[EVENT_NUM]:
if idx<CUT or idx>(len(dataset[5])-CUT):
fh.append( 0.001/(len(dataset[EVENT_NUM])*40.*1e-9)*idx )
fq.append( val )
else:
fq.append(0.)
idx += 1
val = ifft(np.array(fq))
gf = makeGraph(ch,list(val))
gf.GetXaxis().SetRangeUser(0,20000)
gf.SetLineColor(2)
gf.Draw("AL")
gr.Draw("same L")
canv.Draw()
```
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from albert import modeling
from albert import optimization
from albert import tokenization
import tensorflow as tf
import numpy as np
tokenizer = tokenization.FullTokenizer(
vocab_file='albert-base-2020-04-10/sp10m.cased.v10.vocab', do_lower_case=False,
spm_model_file='albert-base-2020-04-10/sp10m.cased.v10.model')
bert_config = modeling.AlbertConfig.from_json_file('albert-base-2020-04-10/config.json')
bert_config
import pickle
with open('albert-squad-test.pkl', 'rb') as fopen:
test_features, test_examples = pickle.load(fopen)
max_seq_length = 384
doc_stride = 128
max_query_length = 64
epoch = 5
batch_size = 22
warmup_proportion = 0.1
n_best_size = 20
num_train_steps = int(len(test_features) / batch_size * epoch)
num_warmup_steps = int(num_train_steps * warmup_proportion)
from tensorflow.contrib import layers as contrib_layers
class Model:
def __init__(self, is_training = True):
self.X = tf.placeholder(tf.int32, [None, None])
self.segment_ids = tf.placeholder(tf.int32, [None, None])
self.input_masks = tf.placeholder(tf.int32, [None, None])
self.start_positions = tf.placeholder(tf.int32, [None])
self.end_positions = tf.placeholder(tf.int32, [None])
self.p_mask = tf.placeholder(tf.int32, [None, None])
self.is_impossible = tf.placeholder(tf.int32, [None])
model = modeling.AlbertModel(
config=bert_config,
is_training=is_training,
input_ids=self.X,
input_mask=self.input_masks,
token_type_ids=self.segment_ids,
use_one_hot_embeddings=False)
final_hidden = model.get_sequence_output()
self.output = final_hidden
learning_rate = 2e-5
start_n_top = 5
end_n_top = 5
is_training = False
tf.reset_default_graph()
model = Model(is_training = is_training)
output = model.output
bsz = tf.shape(output)[0]
return_dict = {}
output = tf.transpose(output, [1, 0, 2])
# invalid position mask such as query and special symbols (PAD, SEP, CLS)
p_mask = tf.cast(model.p_mask, dtype = tf.float32)
# logit of the start position
with tf.variable_scope('start_logits'):
start_logits = tf.layers.dense(
output,
1,
kernel_initializer = modeling.create_initializer(
bert_config.initializer_range
),
)
start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0])
start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask
start_log_probs = tf.nn.log_softmax(start_logits_masked, -1)
# logit of the end position
with tf.variable_scope('end_logits'):
if is_training:
# during training, compute the end logits based on the
# ground truth of the start position
start_positions = tf.reshape(model.start_positions, [-1])
start_index = tf.one_hot(
start_positions,
depth = max_seq_length,
axis = -1,
dtype = tf.float32,
)
start_features = tf.einsum('lbh,bl->bh', output, start_index)
start_features = tf.tile(
start_features[None], [max_seq_length, 1, 1]
)
end_logits = tf.layers.dense(
tf.concat([output, start_features], axis = -1),
bert_config.hidden_size,
kernel_initializer = modeling.create_initializer(
bert_config.initializer_range
),
activation = tf.tanh,
name = 'dense_0',
)
end_logits = contrib_layers.layer_norm(
end_logits, begin_norm_axis = -1
)
end_logits = tf.layers.dense(
end_logits,
1,
kernel_initializer = modeling.create_initializer(
bert_config.initializer_range
),
name = 'dense_1',
)
end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0])
end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask
end_log_probs = tf.nn.log_softmax(end_logits_masked, -1)
else:
# during inference, compute the end logits based on beam search
start_top_log_probs, start_top_index = tf.nn.top_k(
start_log_probs, k = start_n_top
)
start_index = tf.one_hot(
start_top_index,
depth = max_seq_length,
axis = -1,
dtype = tf.float32,
)
start_features = tf.einsum('lbh,bkl->bkh', output, start_index)
end_input = tf.tile(output[:, :, None], [1, 1, start_n_top, 1])
start_features = tf.tile(
start_features[None], [max_seq_length, 1, 1, 1]
)
end_input = tf.concat([end_input, start_features], axis = -1)
end_logits = tf.layers.dense(
end_input,
bert_config.hidden_size,
kernel_initializer = modeling.create_initializer(
bert_config.initializer_range
),
activation = tf.tanh,
name = 'dense_0',
)
end_logits = contrib_layers.layer_norm(
end_logits, begin_norm_axis = -1
)
end_logits = tf.layers.dense(
end_logits,
1,
kernel_initializer = modeling.create_initializer(
bert_config.initializer_range
),
name = 'dense_1',
)
end_logits = tf.reshape(
end_logits, [max_seq_length, -1, start_n_top]
)
end_logits = tf.transpose(end_logits, [1, 2, 0])
end_logits_masked = (
end_logits * (1 - p_mask[:, None]) - 1e30 * p_mask[:, None]
)
end_log_probs = tf.nn.log_softmax(end_logits_masked, -1)
end_top_log_probs, end_top_index = tf.nn.top_k(
end_log_probs, k = end_n_top
)
end_top_log_probs = tf.reshape(
end_top_log_probs, [-1, start_n_top * end_n_top]
)
end_top_index = tf.reshape(
end_top_index, [-1, start_n_top * end_n_top]
)
if is_training:
return_dict['start_log_probs'] = start_log_probs
return_dict['end_log_probs'] = end_log_probs
else:
return_dict['start_top_log_probs'] = start_top_log_probs
return_dict['start_top_index'] = start_top_index
return_dict['end_top_log_probs'] = end_top_log_probs
return_dict['end_top_index'] = end_top_index
# an additional layer to predict answerability
with tf.variable_scope('answer_class'):
# get the representation of CLS
cls_index = tf.one_hot(
tf.zeros([bsz], dtype = tf.int32),
max_seq_length,
axis = -1,
dtype = tf.float32,
)
cls_feature = tf.einsum('lbh,bl->bh', output, cls_index)
# get the representation of START
start_p = tf.nn.softmax(
start_logits_masked, axis = -1, name = 'softmax_start'
)
start_feature = tf.einsum('lbh,bl->bh', output, start_p)
# note(zhiliny): no dependency on end_feature so that we can obtain
# one single `cls_logits` for each sample
ans_feature = tf.concat([start_feature, cls_feature], -1)
ans_feature = tf.layers.dense(
ans_feature,
bert_config.hidden_size,
activation = tf.tanh,
kernel_initializer = modeling.create_initializer(
bert_config.initializer_range
),
name = 'dense_0',
)
ans_feature = tf.layers.dropout(
ans_feature, bert_config.hidden_dropout_prob, training = is_training
)
cls_logits = tf.layers.dense(
ans_feature,
1,
kernel_initializer = modeling.create_initializer(
bert_config.initializer_range
),
name = 'dense_1',
use_bias = False,
)
cls_logits = tf.squeeze(cls_logits, -1)
return_dict['cls_logits'] = cls_logits
seq_length = tf.shape(model.X)[1]
cls_logits = return_dict['cls_logits']
is_impossible = tf.reshape(model.is_impossible, [-1])
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_list = tf.trainable_variables())
saver.restore(sess, 'albert-base-squad/model.ckpt')
import bert_utils as squad_utils
from tqdm import tqdm
all_results = []
pbar = tqdm(
range(0, len(test_features), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
batch = test_features[i: i + batch_size]
batch_ids = [b.input_ids for b in batch]
batch_masks = [b.input_mask for b in batch]
batch_segment = [b.segment_ids for b in batch]
batch_start = [b.start_position for b in batch]
batch_end = [b.end_position for b in batch]
is_impossible = [b.is_impossible for b in batch]
p_mask = [b.p_mask for b in batch]
o = sess.run(
[start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits],
feed_dict = {
model.X: batch_ids,
model.segment_ids: batch_segment,
model.input_masks: batch_masks,
model.p_mask: p_mask
},
)
for no, b in enumerate(batch):
start_top_log_probs_ = (
[float(x) for x in o[0][no].flat])
start_top_index_ = [int(x) for x in o[1][no].flat]
end_top_log_probs_ = (
[float(x) for x in o[2][no].flat])
end_top_index_ = [int(x) for x in o[3][no].flat]
cls_logits_ = float(o[4][no].flat[0])
all_results.append(squad_utils.RawResultV2(
unique_id=b.unique_id,
start_top_log_probs=start_top_log_probs_,
start_top_index=start_top_index_,
end_top_log_probs=end_top_log_probs_,
end_top_index=end_top_index_,
cls_logits=cls_logits_))
n_best_size = 20
max_answer_length = 30
result_dict = {}
cls_dict = {}
squad_utils.accumulate_predictions_v2(
result_dict, cls_dict, test_examples, test_features,
all_results, n_best_size, max_answer_length,
start_n_top, end_n_top)
import json
with open('/home/husein/pure-text/ms-dev-2.0.json') as predict_file:
prediction_json = json.load(predict_file)["data"]
output_prediction_file = 'predict.json'
output_nbest_file = 'nbest_predictions.json'
output_null_log_odds_file = 'null_odds.json'
squad_utils.evaluate_v2(
result_dict, cls_dict, prediction_json, test_examples,
test_features, all_results, n_best_size,
max_answer_length, output_prediction_file, output_nbest_file,
output_null_log_odds_file)
```
| github_jupyter |
# Astronomy 8824 - Problem Set 5
The goal of this problem set is to gain familiarity with Fisher Matrix Forecasts.
This problem set was developed by David Weinberg, with some modifications by Paul Martini.
```
import numpy as np
from numpy import matrix
from numpy import linalg
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from astropy.io import ascii
# matplotlib settings
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('lines', linewidth=2)
plt.rc('axes', linewidth=2)
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def gaussian(x, mu, sig):
'''
Calculate a gaussian with mean mu and dispersion sig at input points x
'''
return np.exp( -0.5 * np.power( (x - mu)/sig, 2.)) / (np.sqrt(2*np.pi)*sig)
def PlotTwoDist(xy1, label1, xy2=None, label2=None, dims=None, addgauss=False, gxsig=False, gysig=False, xy2hist=True, axes=["X", "Y"], connect=20):
'''
xy1, xy2: (x,y) points for the two distributions
label1, label2: labels for the two distributions
dims: [xc, yc, dx, dy] where xc,yc are the plot center and dx, dy are the half sizes of the plot
addgauss: (bool) True to overplot Gaussian on each histogram
xy2hist: (bool) Plot histograms for second distribution
axes: ["X", "Y"] Labels for axes
connect: (default 20) Initial set of points to connect with a solid line
gxsig, gysig: (float) sigma values for the two histograms
'''
if dims is not None:
xc = dims[0]
yc = dims[1]
dx = dims[2]
dy = dims[3]
xbins = np.arange(xc - dx, xc + dx, 0.05*dx)
ybins = np.arange(yc - dy, yc + dy, 0.05*dy)
else:
xbins = []
ybins = []
fig = plt.figure(figsize=(8,8))
gs = GridSpec(4,4)
ax_scatter = fig.add_subplot(gs[1:4, 0:3])
ax_xhist = fig.add_subplot(gs[0,0:3])
ax_yhist = fig.add_subplot(gs[1:4,3])
ax_scatter.scatter(xy1[0], xy1[1], color='k', s=1, label=label1)
ax_xhist.hist(xy1[0], bins=xbins, histtype='step', color='k', density=True)
ax_yhist.hist(xy1[1], bins=ybins, histtype='step', color='k', density=True, orientation='horizontal')
if xy2 is not None:
ax_scatter.scatter(xy2[0], xy2[1], color='r', s=1, label=label2)
ax_scatter.plot(xy2[0][:connect], xy2[1][:connect], color='r', ls='-')
if xy2hist:
ax_xhist.hist(xy2[0], bins=xbins, histtype='step', color='r', density=True)
ax_xhist.hist(xy2[0], bins=xbins, histtype='step', color='r', density=True)
ax_yhist.hist(xy2[1], bins=xbins, histtype='step', color='r', density=True, orientation='horizontal')
if addgauss and gxsig and gysig:
if dims is not None:
ggxx = np.linspace(xc - dx, xc + dx, 100)
ggx = gaussian(ggxx, xc, gxsig)
ax_xhist.plot(ggxx, ggx)
ggyy = np.linspace(yc - dy, yc + dy, 100)
ggy = gaussian(ggyy, yc, gysig)
ax_yhist.plot(ggy, ggyy)
else:
gg = np.linspace(-1*size, size, 100)
ggx = gaussian(gg, 0., gxsig)
ggy = gaussian(gg, 0., gysig)
ax_xhist.plot(gg, ggx)
ax_yhist.plot(ggy, gg)
plt.setp(ax_xhist.get_xticklabels(), visible=False)
plt.setp(ax_xhist.get_yticklabels(), visible=False)
plt.setp(ax_yhist.get_xticklabels(), visible=False)
plt.setp(ax_yhist.get_yticklabels(), visible=False)
ax_scatter.set_xlabel(axes[0], fontsize=16)
ax_scatter.set_ylabel(axes[1], fontsize=16)
if dims is not None:
ax_scatter.set_xlim(xc - dx, xc + dx)
ax_scatter.set_ylim(yc - dy, yc + dy)
ax_xhist.set_xlim(xc - dx, xc + dx)
ax_yhist.set_ylim(yc - dy, yc + dy)
ax_xhist.set_ylabel("N", fontsize=16)
ax_yhist.set_xlabel("N", fontsize=16)
ax_scatter.legend(frameon=False, fontsize=16)
def prob(x,cinv,prefac):
"""
Return multivariate Gaussian probability
x = vector of data values (matrix)
cinv = inverse covariance matrix
prefac = prefactor for properly normalized Gaussian (float),
taken as input so that it isn't computed every call
should be [(2\pi)^{M/2} \sqrt{det(C)}]^{-1}
"""
arg=float(x*cinv*x.T)
return (prefac*np.exp(-arg/2.))
def rundmc(data, initvals, stepvals, nchain, nthin=1, cov=None, intype=1):
'''
Read in data from a file (intype=1) or array (intype=2) and use MCMC to sample the
distribution of parameter values
initvals, stepvals : arrays of initial values and step sizes. Each array is 3 elements. To only sample two
parameters, set last element of stepvals to be zero.
nchain : number of steps
nthin : thin the chain by this amount
intype = 1 (datafile) or 2 (data)
cov = input covariance matrix, will be diagonal if cov=None
'''
if intype == 1:
x, y, errors = np.loadtxt(data, unpack=True)
elif intype == 2:
x, y, errors = data[0], data[1], data[2]
else:
print("Error with {} format".format(data))
t1init = initvals[0]
t2init = initvals[1]
t3init = initvals[2]
step1 = stepvals[0]
step2 = stepvals[1]
step3 = stepvals[2]
if cov is None:
cov = np.diag(errors)
cinv = np.linalg.inv(cov)
prefac=1./(2*np.pi*np.sqrt(np.linalg.det(cov)))
t1 = t1init
t2 = t2init
t3 = t3init
deltay = y - (t1 + t2*x + t3*x*x)
dym = matrix(deltay)
p1 = prob(dym,cinv,prefac) # probability at starting point
chain=np.zeros((nchain,4)) # store (theta1,theta2,theta3,p) as elements of chain
chain[0][0] = t1
chain[0][1] = t2
chain[0][2] = t3
chain[0][3] = p1
ichain = 1
naccept = 0
while (ichain < nchain):
t1trial = t1+step1*np.random.normal()
t2trial = t2+step2*np.random.normal()
t3trial = t3 + step3*np.random.normal()
deltay = y - (t1trial + t2trial*x + t3trial*x*x)
dym = matrix(deltay)
p2 = prob(dym,cinv,prefac) # compute probability
if ((p2>p1 or p1==0) or np.random.random()<p2/p1): # accept step?
chain[ichain][0]=t1trial
chain[ichain][1]=t2trial
chain[ichain][2]=t3trial
chain[ichain][3]=p2
t1=t1trial
t2=t2trial
t3=t3trial
p1=p2
naccept+=1
else:
chain[ichain][0]=t1
chain[ichain][1]=t2
chain[ichain][2]=t3
chain[ichain][3]=p2
ichain+=1
xaccept=float(naccept)/float(ichain)
print('%.4f of trials accepted' % (xaccept))
return chain[::nthin]
```
LaTex macros hidden here --
$\newcommand{\xhat}{\hat{x}}$
$\newcommand{\xmin}{x_{min}}$
$\newcommand{\xmax}{x_{max}}$
$\newcommand{\cinvkl}{C_{kl}^{-1}}$
### 1. Fisher Matrix Forecast, linear fit
Suppose you have 20 (x,y) data points generated from a linear relation y = $\theta_1 + \theta_2 x$ with x uniformly distributed in the range $5 < x < 20$ and independent Gaussian errors on y with standard deviation $\sigma = 1$.
#### a. What is the Fisher matrix?
$$
F_{ij} = \left< \frac{\partial^2 ln L}{\partial\theta_i \partial\theta_j} \right> ?
$$
Express your answer in terms of $\sigma$ and $x$.
#### Answer
#### b) What is the inverse of the Fisher matrix?
Express your answer in terms of the $\sigma$, $x$, and the minimum and maximum value $x_{min}$ and $x_{max}$. _Hint:_ The expectation value is $<x> = (x_{max} + x_{min})/2.$
#### Answer
#### c) If both the intercept $\theta_1$ and the slope $\theta_2$ are to be estimated by fitting the data, what are the expected errors on $\theta_1$ and $\theta_2$?
Use your results from b) to compute numerical values for $\Delta \theta_1$ and $\Delta \theta_2$.
#### Answer
#### d) If the slope $\theta_2$ is known and you only need to solve for $\theta_1$, what is the expected error?
#### Answer
#### e) How does the expected slope error change if N = 6 instead of N = 20? How does the expect slope error change if xmax = 15 instead of xmax = 20?
#### Answer
### 2. MCMC parameters of a linear fit
The repository includes the data files:
- line.n20.s12.dat
- line.n20.s17.dat
- line.n20.s0.dat
- line.n6.s0.dat
These files have (x, y, σ) data points, with σ = 1 in all cases and x evenly spaced in the range 5 – 20. They were generated by the David's program linedata.py (also included), which you should look at to check that you understand what it is doing. For the two files labeled ‘s0.dat’ the points have been forced to lie exactly on the prescribed line.
This notebook includes the function rundmc(), which reads a data file in this format and generates an MCMC for the intercept and slope (θ1, θ2) of a linear fit. You can either use this code or refer to it and write your own. Note that the probability is proportional to exp(-$\chi^2$/2), and you do not have to compute the constant of proportionality because you only need ratios of probabilities for your MCMC.
```
filenames = ['line.n20.s12.dat', 'line.n20.s17.dat', 'line.n20.s0.dat', 'line.n6.s0.dat']
data1 = ascii.read(filenames[0])
data2 = ascii.read(filenames[1])
data3 = ascii.read(filenames[2])
data4 = ascii.read(filenames[3])
```
#### a) For the first two files, compute the best-fit slope and intercept using the formulas we have discussed in class (and are given in Numerical Recipes).
```
### Answer
```
#### b) Generate an MCMC chain for the first 3 files (with N=20). Plot (theta1, theta2) for each and compare the marginal distributions of (theta1, theta2) to the Gaussian distributions with the erorrs you predicted from Part 1.
#### Answer
#### c) Plot instead the distribution of ($\theta_1 + 12.5 \theta_2, \theta_2$). Comment on the result. Relate your interpretation of this result to the Fisher matrix (think particularly about the moments that enter there).
#### Answer
#### d) Repeat b) for line.n6.s0.dat
#### Answer
### 3. A Third Parameter
Suppose that with the same (N = 20) data you allow a third parameter with a quadratic term, $y = \theta_1 + \theta_2 x + \theta_3 x^2$. For the fiducial model being assumed for the forecast, you adopt $\theta_3 = 0$, but you allow it to be free in the fit.
#### a) What is the Fisher Matrix in this case? (You can do the matrix inversion numerically.) What are the forecast errors on $\theta_1, \theta_2, \theta_3$?
#### Answer
#### b) Use rundmc() or your own code to create a chain for this 3-parameter model. Apply it to the files line.n20.s0.dat and line.n6.s0.dat and plot the results, with particular attention to $\theta_3$ vs. $\theta_2$. Compare the errors from MCMC to your Fisher matrix forecast.
#### Answer
### 4. Correlated Errors
The code linepluscov() generates a distribution of points with correlated errors. Run the code for 20 points in the range x = 5 – 20 with a slope $\theta_2 = 0.5$ and intercept $\theta_1 = 2$ for the random number seeds 12 and 17 used previously for the diagonal case. For this problem we are changing the slope from $\theta_2 = 2$ to $\theta_2 = 0.5$ while keeping $\sigma = 1$. This shrinks the vertical scale relative to the error bar, making the effect of correlations easier to see.
```
def linepluscov(xmin, xmax, npoints, a, b, seed):
'''
Generate points on a line with errors drawn from a multivariate Gaussian with various
covariance matrices
Parameters:
xmin,xmax = range of x values
npoints = number of points, evenly distributed in xmin,xmax
a, b = slope and intercept of line
seed = random number seed
Written by DHW, modified for notebook by PM
'''
sigma=1.0
np.random.seed(seed)
output = {}
x=np.linspace(xmin,xmax,npoints)
y=a*x+b
errors=sigma*np.ones(npoints)
mu=np.zeros(npoints)
cov=np.diag(errors)
delta=np.random.multivariate_normal(mu,cov)
y1=y+delta
output['A'] = np.transpose([x,y1,errors])
offd=0.2
b=offd*np.ones(npoints-1)
cov1=cov+np.diag(b,1)+np.diag(b,-1)
delta=np.random.multivariate_normal(mu,cov1)
y1=y+delta
output['B'] = np.transpose([x,y1,errors])
offd=-0.2
b=offd*np.ones(npoints-1)
cov1=cov+np.diag(b,1)+np.diag(b,-1)
delta=np.random.multivariate_normal(mu,cov1)
y1=y+delta
output['C'] = np.transpose([x,y1,errors])
offd=0.4
b=offd*np.ones(npoints-1)
cov1=cov+np.diag(b,1)+np.diag(b,-1)
delta=np.random.multivariate_normal(mu,cov1)
y1=y+delta
output['D'] = np.transpose([x,y1,errors])
offd=0.4
cov1=offd*np.ones((npoints,npoints))+np.diag(errors-offd)
delta=np.random.multivariate_normal(mu,cov1)
y1=y+delta
output['E'] = np.transpose([x,y1,errors])
return output
# Generate two realizations
output12 = linepluscov(5, 20, 20, 0.5, 2, 12)
output17 = linepluscov(5, 20, 20, 0.5, 2, 17)
```
#### a) What covariance matrices are being used for the five sets of data points (A, B, C, D, E) that the code produces? (Look at the code to figure out what it is doing.)
#### Answer
#### b) Plot the two realizations of N = 20 points for each of the five cases, attaching error bars, and including the y = 0.5 x + 2 line
```
#### Answer
```
### 5. (OPTIONAL) Parameter errors with correlated data errors
#### a) Compute the predicted errors on $\theta_1$ and $\theta_2$ for each case from Part 4. You’ll now need to compute the Fisher matrix using the expression with the full covariance matrix (Stats Notes 4, page 5) and invert it numerically.
```
#### Answer
sigma = 1.0
N = 20
xmin = 5
xmax = 20
a = 0.5
b = 2.
x = np.linspace(xmin, xmax, N)
y = a*x + b
errors = sigma*np.ones(N)
mu = np.zeros(N)
# Case A:
covA = np.diag(errors)
# Case B:
offd = 0.2
b = offd*np.ones(N-1)
covB = covA + np.diag(b, 1) + np.diag(b, -1)
# Case C:
offd = -0.2
b = offd*np.ones(N-1)
covC = covA + np.diag(b, 1) + np.diag(b, -1)
# Case D:
offd = 0.4
b = offd*np.ones(N-1)
covD = covA + np.diag(b, 1) + np.diag(b, -1)
# Case E:
offd = 0.4
covE = offd*np.ones((N, N)) + np.diag(errors-offd)
def get_fisher(cov, x, label):
'''
your code here
'''
fish = np.diag( (2, 2) ) # replace this
fishinv = linalg.inv(fish)
return fish, fishinv
print("sigma1, sigma2")
fishA, fishAinv = get_fisher(covA, x, "Case A")
fishB, fishBinv = get_fisher(covB, x, "Case B")
fishC, fishCinv = get_fisher(covC, x, "Case C")
fishD, fishDinv = get_fisher(covD, x, "Case D")
fishE, fishEinv = get_fisher(covE, x, "Case E")
```
#### Answer
#### b) How do the correlated errors affect the expected parameter errors? Does the behavior make sense?
#### Answer
#### c) For D and E, check the Fisher matrix against the MCMC
#### Answer
| github_jupyter |
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from riverreliability.plots import posterior_reliability_diagram
from riverreliability import metrics
LEARNING_RATE = .1
LR_DROP = 10000
MOMENTUM = .9
LR_DECAY = .96
EPOCHS = 250
model = tf.keras.applications.ResNet152V2(
include_top=True,
weights=None,
classes=10,
input_shape=[32, 32, 3]
)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
LEARNING_RATE,
decay_steps=LR_DROP,
decay_rate=LR_DECAY,
staircase=True)
model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=['accuracy'])
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
mean = x_train.mean()
std = x_train.std()
x_train = (x_train - mean) / (std + 1e-7)
x_test = (x_test - mean) / (std + 1e-7)
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(x_train)
model.fit(datagen.flow(x_train, y_train, batch_size=128),
epochs=EPOCHS,
validation_data=(x_test, y_test))
model.save('models/cifar10_resnet.h5')
model = tf.keras.models.load_model('models/cifar10_resnet.h5')
z_test = model.predict(x_test, batch_size=128, verbose=1)
y_probs = z_test.max(axis=1)
y_preds = z_test.argmax(axis=1)
y_true = y_test.argmax(axis=1)
acc = (y_preds == y_true).mean()
print(f'Accuracy: {acc}')
ece = metrics.ece(y_probs, y_preds, y_true)
print(f'ECE : {ece}')
peace = metrics.peace(y_probs, y_preds, y_true)
print(f'PEACE : {peace}')
ax = plt.gca()
plt.title('ResNet152 on CIFAR-10')
posterior_reliability_diagram(y_probs, y_preds, y_true, ax, bins=10)
plt.savefig('plots/resnet.pdf')
plt.show()
```
| github_jupyter |
```
# Load packages
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pandas as pd
import os
import scipy as scp
import scipy.stats as scps
from datetime import datetime
# Load my own functions
import dnnregressor_train_eval_keras as dnnk
import make_data_wfpt as mdw
# Load data
data = pd.read_csv(os.getcwd() + '/data_storage/data_11000000_from_simulation_mix_09_12_18_18_20_50.csv')
# Some cleaning of the data
data = data[['v', 'a', 'w', 'rt', 'choice', 'nf_likelihood']]
data = data.loc[data['w'] > 0.1]
data = data.loc[data['w'] < 0.9]
data = data.loc[data['a'] > 0.5]
mini_data = data.loc[1:10000]
train_f, train_l, test_f, test_l = mdw.train_test_split_rt_choice(data = data,
write_to_file = False,
from_file = False,
p_train = 0.8,
backend = 'keras')
# Choice probabilities
# train_f, train_l, test_f, test_l = mdw.train_test_from_file_choice_probabilities(n_samples = 2500000,
# f_signature = '_choice_probabilities_analytic_',
# backend = 'keras')
# rt_choice
# train_f, train_l, test_f, test_l = mdw.train_test_from_file_rt_choice(n_samples = 11000000,
# f_signature = '_from_simulation_mix_',
# backend = 'keras')
# Make dnnk class (cpm for choice probability model)
cpm = dnnk.dnn_trainer()
cpm.data['train_features'] = train_f
cpm.data['train_labels'] = train_l
cpm.data['test_features'] = test_f
cpm.data['test_labels'] = test_l
# Make all parameters we can specify explicit
# Model parameters
cpm.model_params
# Parameters governing training
cpm.train_params
# Parameters concerning data storage
cpm.data_params
# SPECIFYING META PARAMETERS THAT STAY CONSTANT DURING HYPERPARAMETER OPTIMIZATION
# Model params
cpm.model_params['output_activation'] = 'linear'
cpm.model_params['input_shape'] = 5
# Training params
# Meta
cpm.train_params['early_stopping_patience'] = 5
cpm.train_params['plateau_patience'] = 3
cpm.train_params['min_delta'] = 0.05
cpm.train_params['ckpt_period'] = 1
cpm.train_params['model_cnt'] = 0
cpm.train_params['max_train_epochs'] = 25
# Hyper
#cpm.train_params['l1_kernel']
cpm.model_params['hidden_layers'] = [5, 5, 5, 5]
#cpm.train_params['hidden_activations']
#cpm.train_params['l2_kernel'] = [0.5, 0.5, 0.5, 0.5]
#cpm.train_params['l2_activation'] = [0.5, 0.5, 0.5, 0.5]
# Data params
cpm.data_params['data_type'] = 'wfpt'
cpm.data_params['data_type_signature'] = '_choice_rt_'
cpm.data_params['training_data_size'] = 11000000
# Update timestamp
cpm.data_params['timestamp'] = datetime.now().strftime('%m_%d_%y_%H_%M_%S')
# Make model
# cpm.keras_model_generate(save_model = True)
# Train model
# cpm.run_training(save_history = True,
# warm_start = False)
# Hyperparameter training loop:
# Runs:
num_runs = 25
cnt = 0
max_layers = 5
layer_sizes = [10, 20, 50]
batch_sizes = [1000, 10000, 50000]
regularization_sizes = [0.05, 0.1, 0.2]
# Update model directory to make sure we collect all our models from this hyperparameter optimization run in the same place
cpm.data_params['model_directory'] = '/home/afengler/git_repos/nn_likelihoods/keras_models/'
cpm.data_params['model_name'] = 'dnnregressor_wftp_hyp_opt'
cpm.train_params['model_cnt'] = 0
histories = []
while cnt < num_runs:
cnt += 1
# Sample # layers
num_layers = np.random.choice(np.arange(1, max_layers, 1))
# Layer sizes
layers = []
activations = []
regularizers_l1 = []
regularizers_l2 = []
regularizer = np.random.choice(['l1', 'l2'])
regularizer_size = np.random.choice(regularization_sizes)
for i in range(0, num_layers, 1):
layers.append(np.random.choice(layer_sizes))
activations.append('relu')
if regularizer == 'l1':
regularizers_l1.append(regularizer_size)
regularizers_l2.append(0.0)
else:
regularizers_l1.append(0.0)
regularizers_l2.append(regularizer_size)
# Batch size
batch_size = np.random.choice(batch_sizes)
# Update relevant model parameters
cpm.train_params['batch_size'] = batch_size
print('batch_size: ', batch_size)
cpm.model_params['hidden_layers'] = layers
print('layers: ', layers)
cpm.model_params['hidden_activations'] = activations
print('hidden_activations:', activations)
# cpm.model_params['l1_activation'] = regularizers_l1
# print('l1_activatons: ', regularizers_l1)
# cpm.model_params['l2_activation'] = regularizers
# print('l2_activations:', regularizers_l2)
cpm.model_params['l1_kernel'] = regularizers_l1
print('l1_kernel: ', regularizers_l1)
cpm.model_params['l2_kernel'] = regularizers_l2
print('l2_kernel: ', regularizers_l2)
# Make new timestamp
#cpm.data_params['timestamp'] = datetime.now().strftime('%m_%d_%y_%H_%M_%S')
# Make model
cpm.keras_model_generate(save_model = True)
# Train model
cpm.run_training(save_history = True,
warm_start = False) # Note that this increments model count automatically !
# histories[-1]['model_cnt'] = cpm.train_params['model_cnt']
# histories[-1]['num_layers'] = num_layers
# histories[-1]['size_layers'] = str(layers)
# histories[-1]['activations'] = str(activations)
# histories[-1]['batch_size'] = batch_size
print(cnt)
# histories = pd.concat(histories)
# histories['optimizer'] = cpm.model_params['optimizer']
# histories['timestamp'] = datetime.now().strftime('%m_%d_%y_%H_%M_%S')
# histories.to_csv(cpm.data_params['model_directory'] + cpm.data_params['model_name'] + '_choice_rt_' +\
# cpm.data_params['timestamp'] + '/hyp_opt_histories.csv')
```
| github_jupyter |
# PyTorch
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
seed = 1
lr = 0.001
momentum = 0.5
batch_size = 64
test_batch_size = 64
epochs = 5
no_cuda = False
log_interval = 100
```
## Model
```
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
```
## Preprocess
```
train_dir = '../../Fast Campus/dataset/mnist_png/training/'
test_dir = '../../Fast Campus/dataset/mnist_png/testing/'
```
grayscale은 안 되는 이유
https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py#L157
```
torch.manual_seed(seed)
use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
train_dataset = datasets.ImageFolder(root=train_dir,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
test_dataset = datasets.ImageFolder(root=test_dir,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=2,
shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=2
)
```
## Optimization
```
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
```
## Training
```
for epoch in range(1, epochs + 1):
# Train Mode
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad() # backpropagation 계산하기 전에 0으로 기울기 계산
output = model(data)
loss = F.nll_loss(output, target) # https://pytorch.org/docs/stable/nn.html#nll-loss
loss.backward() # 계산한 기울기를
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# Test mode
model.eval() # batch norm이나 dropout 등을 train mode 변환
test_loss = 0
correct = 0
with torch.no_grad(): # autograd engine, 즉 backpropagatin이나 gradient 계산 등을 꺼서 memory usage를 줄이고 속도를 높임
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item() # pred와 target과 같은지 확인
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_08_1_kaggle_intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 8: Kaggle Data Sets**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 8 Material
* **Part 8.1: Introduction to Kaggle** [[Video]](https://www.youtube.com/watch?v=v4lJBhdCuCU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_1_kaggle_intro.ipynb)
* Part 8.2: Building Ensembles with Scikit-Learn and Keras [[Video]](https://www.youtube.com/watch?v=LQ-9ZRBLasw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_2_keras_ensembles.ipynb)
* Part 8.3: How Should you Architect Your Keras Neural Network: Hyperparameters [[Video]](https://www.youtube.com/watch?v=1q9klwSoUQw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_3_keras_hyperparameters.ipynb)
* Part 8.4: Bayesian Hyperparameter Optimization for Keras [[Video]](https://www.youtube.com/watch?v=sXdxyUCCm8s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb)
* Part 8.5: Current Semester's Kaggle [[Video]](https://www.youtube.com/watch?v=PHQt0aUasRg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_5_kaggle_project.ipynb)
# Part 8.1: Introduction to Kaggle
[Kaggle](http://www.kaggle.com) runs competitions in which data scientists compete in order to provide the best model to fit the data. A common project to get started with Kaggle is the [Titanic data set](https://www.kaggle.com/c/titanic-gettingStarted). Most Kaggle competitions end on a specific date. Website organizers have currently scheduled the Titanic competition to end on December 31, 20xx (with the year usually rolling forward). However, they have already extended the deadline several times, and an extension beyond 2014 is also possible. Second, the Titanic data set is considered a tutorial data set. In other words, there is no prize, and your score in the competition does not count towards becoming a Kaggle Master.
### Kaggle Ranks
Kaggle ranks are achieved by earning gold, silver and bronze medals.
* [Kaggle Top Users](https://www.kaggle.com/rankings)
* [Current Top Kaggle User's Profile Page](https://www.kaggle.com/stasg7)
* [Jeff Heaton's (your instructor) Kaggle Profile](https://www.kaggle.com/jeffheaton)
* [Current Kaggle Ranking System](https://www.kaggle.com/progression)
### Typical Kaggle Competition
A typical Kaggle competition will have several components. Consider the Titanic tutorial:
* [Competition Summary Page](https://www.kaggle.com/c/titanic)
* [Data Page](https://www.kaggle.com/c/titanic/data)
* [Evaluation Description Page](https://www.kaggle.com/c/titanic/details/evaluation)
* [Leaderboard](https://www.kaggle.com/c/titanic/leaderboard)
### How Kaggle Competitions are Scored
Kaggle is provided with a data set by the competition sponsor, as seen in Figure 8.SCORE. This data set is divided up as follows:
* **Complete Data Set** - This is the complete data set.
* **Training Data Set** - You are provided both the inputs and the outcomes for the training portion of the data set.
* **Test Data Set** - You are provided the complete test data set; however, you are not given the outcomes. Your submission is your predicted outcomes for this data set.
* **Public Leaderboard** - You are not told what part of the test data set contributes to the public leaderboard. Your public score is calculated based on this part of the data set.
* **Private Leaderboard** - You are not told what part of the test data set contributes to the public leaderboard. Your final score/rank is calculated based on this part. You do not see your private leaderboard score until the end.
**Figure 8.SCORE: How Kaggle Competitions are Scored**

### Preparing a Kaggle Submission
Code need not be submitted to Kaggle. For competitions, you are scored entirely on the accuracy of your sbmission file. A Kaggle submission file is always a CSV file that contains the **Id** of the row you are predicting and the answer. For the titanic competition, a submission file looks something like this:
```
PassengerId,Survived
892,0
893,1
894,1
895,0
896,0
897,1
...
```
The above file states the prediction for each of various passengers. You should only predict on ID's that are in the test file. Likewise, you should render a prediction for every row in the test file. Some competitions will have different formats for their answers. For example, a multi-classification will usually have a column for each class and your predictions for each class.
# Select Kaggle Competitions
There have been many interesting competitions on Kaggle, these are some of my favorites.
## Predictive Modeling
* [Otto Group Product Classification Challenge](https://www.kaggle.com/c/otto-group-product-classification-challenge)
* [Galaxy Zoo - The Galaxy Challenge](https://www.kaggle.com/c/galaxy-zoo-the-galaxy-challenge)
* [Practice Fusion Diabetes Classification](https://www.kaggle.com/c/pf2012-diabetes)
* [Predicting a Biological Response](https://www.kaggle.com/c/bioresponse)
## Computer Vision
* [Diabetic Retinopathy Detection](https://www.kaggle.com/c/diabetic-retinopathy-detection)
* [Cats vs Dogs](https://www.kaggle.com/c/dogs-vs-cats)
* [State Farm Distracted Driver Detection](https://www.kaggle.com/c/state-farm-distracted-driver-detection)
## Time Series
* [The Marinexplore and Cornell University Whale Detection Challenge](https://www.kaggle.com/c/whale-detection-challenge)
## Other
* [Helping Santa's Helpers](https://www.kaggle.com/c/helping-santas-helpers)
# Module 8 Assignment
You can find the first assignment here: [assignment 8](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class8.ipynb)
| github_jupyter |
# Multiple Time Series, Pre-trained Models and Covariates
This notebook serves as a tutorial for:
* Training a single model on multiple time series
* Using a pre-trained model to obtain forecasts for any time series unseen during training
* Training and using a model using covariates
First, some necessary imports:
```
# fix python path if working locally
from utils import fix_pythonpath_if_working_locally
fix_pythonpath_if_working_locally()
import pandas as pd
import numpy as np
import torch
import matplotlib.pyplot as plt
from darts import TimeSeries
from darts.utils.timeseries_generation import (
gaussian_timeseries,
linear_timeseries,
sine_timeseries,
)
from darts.models import (
RNNModel,
TCNModel,
TransformerModel,
NBEATSModel,
BlockRNNModel,
)
from darts.metrics import mape, smape
from darts.dataprocessing.transformers import Scaler
from darts.utils.timeseries_generation import datetime_attribute_timeseries
from darts.datasets import AirPassengersDataset, MonthlyMilkDataset
# for reproducibility
torch.manual_seed(1)
np.random.seed(1)
```
### Read Data
Let's start by reading two time series - one containing the monthly number of air passengers, and another containing the monthly milk production per cow. These time series have not much to do with each other, except that they both have a monthly frequency with a marked yearly periodicity and upward trend, and (completely coincidentaly) they contain values of a comparable order of magnitude.
```
series_air = AirPassengersDataset().load()
series_milk = MonthlyMilkDataset().load()
series_air.plot(label="Number of air passengers")
series_milk.plot(label="Pounds of milk produced per cow")
plt.legend()
```
### Preprocessing
Usually neural networks tend to work better on normalised/standardised data. Here we'll use the `Scaler` class to normalise both of our time series between 0 and 1:
```
scaler_air, scaler_milk = Scaler(), Scaler()
series_air_scaled = scaler_air.fit_transform(series_air)
series_milk_scaled = scaler_milk.fit_transform(series_milk)
series_air_scaled.plot(label="air")
series_milk_scaled.plot(label="milk")
plt.legend()
```
### Train / Validation split
Let's keep the last 36 months of both series as validation:
```
train_air, val_air = series_air_scaled[:-36], series_air_scaled[-36:]
train_milk, val_milk = series_milk_scaled[:-36], series_milk_scaled[-36:]
```
## Global Forecasting Models
Darts contains many forecasting models, but not all of them can be trained on several time series. The models that support training on multiple series are called *global* models. At the time of writing, there are 5 global models:
* BlockRNNModel
* RNNModel
* Temporal Convolutional Networks (TCNs)
* N-Beats
* Transformer model
In the following, we will distinguish two sorts of time series:
* The **target time series** is the time series we are interested to forecast (given its history)
* A **covariate time series** is a time series which may help in the forecasting of the target series, but that we are not interested in forecasting. It's sometimes also called *external data*.
We further differentiate covariates series, depending on whether they can be known in advance or not:
* **Past Covariates** denote time series whose past values are known at prediction time. These are usually things that have to be measured or observed.
* **Future Covariates** denote time series whose future values are already known at prediction time for the span of the forecast horizon. These can for instance represent known future holidays, or weather forecasts.
Some models use only past covariates, others use only future covariates, and some models might use both. We will dive deeper in this topic in some other notebook, but for now it is enough to know this:
* `BlockRNNModel`, `TCNModel`, `NBEATSModel` and `TransformerModel` all use `past_covariates`.
* `RNNModel` uses `future_covariates`.
All of the global models listed above support training on multiple series. In addition, they also all support *multivariate series*. This means that they can seamlessly be used with time series of more than one dimension; the target series can contain one (as is often the case) or several dimensions. A time series with several dimensions is really just a regular time series where the values at each time stamps are vectors instead of scalars.
As an example, the 4 models supporting `past_covariates` follow a "block" architecture. They contain a neural network that takes chunks of time series in input, and outputs chunks of (predicted) future time series values. The input dimensionality is the number of dimensions (components) of the target series, plus the number of components of all the covariates - stacked together. The output dimensionality is simply the number of dimensions of the target series:

The `RNNModel` works differently, in a recurrent fashion (which is also why they support future covariates).
The good news is that as a user, we don't have to worry too much about the different model types and input/output dimensionalities. The dimensionalities are automatically inferred for us by the model based on the training data, and the support for past or future covariates is simply handled by the `past_covariates` or `future_covariates` arguments.
We'll still have to specify two important parameters when building our models:
* `input_chunk_length`: this is the length of the lookback window of the model; so each output will be computed by the model by reading the previous `input_chunk_length` points.
* `output_chunk_length`: this is the length of the outputs (forecasts) produced by the internal model. However, the `predict()` method of the "outer" Darts model (e.g., the one of `NBEATSModel`, `TCNModel`, etc) can be called for a longer time horizon. In these cases, if `predict()` is called for a horizon longer than `output_chunk_length`, the internal model will simply be called repeatedly, feeding on its own previous outputs in an auto-regressive fashion. If `past_covariates` are used it requires these covariates to be known for long enough in advance.
### Example with One Series
Let's look at a first example. We'll build an N-BEATS model that has a lookback window of 24 points (`input_chunk_length=24`) and predicts the next 12 points (`output_chunk_length=12`). We chose these values so it'll make our model produce successive predictions for one year at a time, looking at the past two years.
```
model_air = NBEATSModel(
input_chunk_length=24, output_chunk_length=12, n_epochs=200, random_state=0
)
```
This model can be used like any other Darts forecasting model, beeing fit on a single time series:
```
model_air.fit(train_air, verbose=True)
```
And like any other Darts forecasting models, we can then get a forecast by calling `predict()`. Note that below, we are calling `predict()` with a horizon of 36, which is longer than the model internal `output_chunk_length` of 12. That's not a problem here - as explained above, in such a case the internal model will simply be called auto-regressively on its own outputs. In this case, it will be called three times so that the three 12-points outputs make up the final 36-points forecast - but all of this is done transparently behind the scenes.
```
pred = model_air.predict(n=36)
series_air_scaled.plot(label="actual")
pred.plot(label="forecast")
plt.legend()
print("MAPE = {:.2f}%".format(mape(series_air_scaled, pred)))
```
### Training Process (behind the scenes)
So what happened when we called `model_air.fit()` above?
In order to train the internal neural network, Darts first makes a dataset of inputs/outputs examples from the provided time series (in this case: `series_air_scaled`). There are several ways this can be done and Darts contains a few different dataset implementations in the `darts.utils.data` package.
By default, `NBEATSModel` will instantiate a `darts.utils.data.PastCovariatesSequentialDataset`, which simply builds all the consecutive pairs of input/output sub-sequences (of lengths `input_chunk_length` and `output_chunk_length`) existing in the series).
For an example series of length 14, with `input_chunk_length=4` and `output_chunk_length=2`, it looks as follows:

For such a dataset, a series of length `N` would result in a "training set" of `N - input_chunk_length - output_chunk_length + 1` samples. In the toy example above, we have `N=14`, `input_chunk_length=4` and `output_chunk_length=2`, so the number of samples used for training would be K = 9. In this context, a training *epoch* consists in complete pass (possibly consisting of several mini-batches) over all the samples.
Note that different models are susceptible to use different datasets by default. For instance, `darts.utils.data.HorizonBasedDataset` is inspired by the [N-BEATS paper](https://arxiv.org/abs/1905.10437) and produces samples that are "close" to the end of the series, possibly even ignoring the beginning of the series.
If you have the need to control the way training samples are produced from `TimeSeries` instances, you can implement your own training dataset by inheriting the abstract `darts.utils.data.TrainingDataset` class. Darts datasets are inheriting from torch `Dataset`, which means it's easy to implement lazy versions that do not load all data in memory at once. Once you have your own instance of a dataset, you can directly call the `fit_from_dataset()` method, which is supported by all global forecasting models.
## Training a Model on Multiple Time Series
All this machinery can be seamlessly used with multiple time series. Here's how a sequential dataset with `input_chunk_length=4` and `output_chunk_length=2` looks for two series of lengths N and M:

Note a few things here:
* The different series do not need to have the same length, or even to share the same time stamps.
* In fact, they don't even need to have the same frequency.
* The total number of samples in the training dataset will be the union of all the training samples contained in each series; so a training epoch will now span all samples from all series.
### Training on Both Air Traffic and Milk Series
Let's look at another example where we fit another model instance on our two time series (air passengers and milk production). Since using two series of (roughly) the same length (roughly) doubles the training dataset size, we will use half of the number of epochs:
```
model_air_milk = NBEATSModel(
input_chunk_length=24, output_chunk_length=12, n_epochs=100, random_state=0
)
```
Then, fitting the model on two (or more) series is as simple as giving a list of series (instead of a single series) in argument to the `fit()` function:
```
model_air_milk.fit([train_air, train_milk], verbose=True)
```
### Producing Forecasts After the End of a Series
Now, importantly, when computing the forecasts we have to specify which time series we want to forecast the future for.
We didn't have this constraint earlier. When fitting models on one series only, the model remembers this series internally, and if `predict()` is called without the `series` argument, it returns a forecast for the (unique) training series. This does not work anymore as soon as a model is fit on more than one series - in this case the `series` argument of `predict()` becomes mandatory.
So, let's say we want to predict future of air traffic. In this case we specify `series=train_air` to the `predict()` function in order to say we want to get a forecast for what comes after `train_air`:
```
pred = model_air_milk.predict(n=36, series=train_air)
series_air_scaled.plot(label="actual")
pred.plot(label="forecast")
plt.legend()
print("MAPE = {:.2f}%".format(mape(series_air_scaled, pred)))
```
## Wait... does this mean that milk consumption helps to predict air traffic??
Well, in this particular instance with this model, it seems to be the case (at least in terms of MAPE error). This is not so weird if you think about it, though. Air traffic is heavily characterized by the yearly seasonality and upward trend. The milk series exhibits these two traits as well, and in this case it's probably helping the model to capture them.
Note that this points towards the possibility of *pre-training* forecasting models; training models once and for all and later using them to forecast series that are not in the train set.
With our toy model we can really forecast the future values of any other series, even series never seen during training. For the sake of example, let's say we want to forecast the future of some arbitrary sine wave series:
```
any_series = sine_timeseries(length=50, freq="M")
pred = model_air_milk.predict(n=36, series=any_series)
any_series.plot(label='"any series, really"')
pred.plot(label="forecast")
plt.legend()
```
This forecast isn't good (the sine doesn't even have a yearly seasonality), but you get the idea.
Similar to what is supported by the `fit()` function, we can also give a list of series in argument to the `predict()` function, in which case it will return a list of forecast series. For example, we can get the forecasts for both the air traffic and the milk series in one go as follows:
```
pred_list = model_air_milk.predict(n=36, series=[train_air, train_milk])
for series, label in zip(pred_list, ["air passengers", "milk production"]):
series.plot(label=f"forecast {label}")
plt.legend()
```
The two series returned correspond to the forecasts after then end of `train_air` and `train_milk`, respectively.
## Covariates Series
Until now, we have only been playing with models that only use the history of the *target* series to predict its future. However, as explained above, the global Darts models also support the use of *covariates* time series. These are time series of "external data", which we are not necessarily interested in predicting, but which we would still like to feed as input of our models because they can contain valuable information.
#### Building Covariates
Let's see a simple example with our air and milk series, where we'll try to use the year and month-of-the-year as covariates:
```
# build year and month series:
air_year = datetime_attribute_timeseries(series_air_scaled, attribute="year")
air_month = datetime_attribute_timeseries(series_air_scaled, attribute="month")
milk_year = datetime_attribute_timeseries(series_milk_scaled, attribute="year")
milk_month = datetime_attribute_timeseries(series_milk_scaled, attribute="month")
# stack year and month to obtain series of 2 dimensions (year and month):
air_covariates = air_year.stack(air_month)
milk_covariates = milk_year.stack(milk_month)
# scale them between 0 and 1:
scaler_dt_air = Scaler()
air_covariates = scaler_dt_air.fit_transform(air_covariates)
scaler_dt_milk = Scaler()
milk_covariates = scaler_dt_milk.fit_transform(milk_covariates)
# split in train/validation sets:
air_train_covariates, air_val_covariates = air_covariates[:-36], air_covariates[-36:]
milk_train_covariates, milk_val_covariates = (
milk_covariates[:-36],
milk_covariates[-36:],
)
# plot the covariates:
plt.figure()
air_covariates.plot()
plt.title("Air traffic covariates (year and month)")
plt.figure()
milk_covariates.plot()
plt.title("Milk production covariates (year and month)")
```
Good, so for each target series (air and milk), we have built a covariates series having the same time axis and containing the year and the month.
Note that here the covariates series are **multivariate time series**: they contain two dimensions - one dimension for the year and one for the month.
### Training with Covariates
Let's revisit our example again, this time with covariates. We will build a `BlockRNNModel` here:
```
model_cov = BlockRNNModel(
model="LSTM",
input_chunk_length=24,
output_chunk_length=12,
n_epochs=300,
random_state=0,
)
```
Now, to train the model with covariates, it is as simple as providing the covariates (in form of a list matching the target series) as `future_covariates` argument to the `fit()` function. The argument is named `future_covariates` to remind us that the model can use future values of these covariates in order to make a prediction.
```
model_cov.fit(
series=[train_air, train_milk],
past_covariates=[air_train_covariates, milk_train_covariates],
verbose=True,
)
```
### Forecasting with Covariates
similarly, getting a forecast is now only a matter of specifying the `future_covariates` argument to the `predict()` function.
```
pred_cov = model_cov.predict(n=36, series=train_air, past_covariates=air_covariates)
series_air_scaled.plot(label="actual")
pred_cov.plot(label="forecast")
plt.legend()
```
Note that here we called `predict()` with a forecast horizon `n` that is larger than the `output_chunk_length` we trained our model with. We were able to do this because even though `BlockRNNModel` uses past covariates, in this case these covariates are also known into the future, so Darts is able to compute the forecasts auto-regressively for `n` time steps in the future.
### Backtesting with Covariates
We can also backtest the model using covariates. Say for instance we are interested in evaluating the running accuracy with a horizon of 12 months, starting at 75% of the air series:
```
backtest_cov = model_cov.historical_forecasts(
series_air_scaled,
past_covariates=air_covariates,
start=0.6,
forecast_horizon=12,
stride=1,
retrain=False,
verbose=True,
)
series_air_scaled.plot(label="actual")
backtest_cov.plot(label="forecast")
plt.legend()
print("MAPE (using covariates) = {:.2f}%".format(mape(series_air_scaled, backtest_cov)))
```
### A few more words on past covariates, future covariates and other conditioning
At the moment Darts supports covariates that are themselves time series. These covariates are used as model inputs, but are never themselves subject to prediction. The covariates do not necessarily have to be aligned with the target series (e.g. they do not need to start at the same time). Darts will use the actual time values of the `TimeSeries` time axes in order to jointly slice the targets and covariates correctly, both for training and inference. Of course the covariates still need to have a sufficient span, otherwise Darts will complain.
As explained above, `TCNModel`, `NBEATSModel`, `BlockRNNModel`, `TransformerModel` use past covariates (they will complain if you try using `future_covariates`). If these past covariates happen to also be known into the future, then these models are also able to produce forecasts for `n > output_chunk_length` (as shown above for `BlockRNNModel`) in an auto-regressive way.
By contrast, `RNNModel` uses future covariates (it will complain if you try specifying `past_covariates`). This means that prediction with this model requires the covariates (at least) `n` time steps into the future after prediction time.
Past and future covariates (as well as the way they are consummed by the different models) an important but non-trivial topic, and we plan to dedicate a future notebook (or article) to explain this further.
At the time of writing, Darts does not support covariates that are not time series - such as for instance class label informations or other conditioning variables. One trivial (although likely suboptimal) way to go around this is to build time series filled with constant values encoding the class labels. Supporting more general types of conditioning is a future feature on the Darts development roadmap.
| github_jupyter |
```
import pandas as pd
import geoplot
import geopandas
import matplotlib.pyplot as plt
%matplotlib inline
from shapely.geometry import Polygon
import warnings
warnings.filterwarnings(action="ignore")
#Check geopandas version
geopandas.__version__
#Set figure size and font size
plt.rcParams["figure.figsize"]=(12,10)
plt.rcParams["font.size"]=12
```
# Getting the canvas ready
```
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
world.plot()
fig=geoplot.polyplot(world,projection=geoplot.crs.Orthographic())
plt.show()
europe=world[world.continent=="Europe"]
europe.plot()
europe=europe[(europe.name!="Russia") & (europe.name!="Iceland")]
europe.plot()
```
## Clip French Guinea off of map of Europe
```
# Create a custom polygon
polygon = Polygon([(-25,35), (40,35), (40,75),(-25,75)])
poly_gdf = geopandas.GeoDataFrame([1], geometry=[polygon], crs=world.crs)
fig,ax=plt.subplots()
ax=europe.plot(ax=ax)
poly_gdf.plot(edgecolor="red",ax=ax, alpha=0.1)
plt.show()
#Clip polygon from the map of Europe
europe=geopandas.clip(europe, polygon) #Input and feature to be clipped
europe.plot()
```
## Data Preparation
Source: https://ourworldindata.org/grapher/carbon-intensity-electricity
```
df=pd.read_csv("carbon-intensity-electricity.csv")
df
df["Entity"].unique()
len(df["Entity"].unique())
europe.name.unique()
len(europe.name.unique())
list(europe.name.unique())
```
### Check if countries in df are present in europe geodataframe or not
```
#Initialize an empty list for countries which are present in df, but not in europe
unmatched=[]
for country in list(df["Entity"].unique()):
if country in (list(europe.name.unique())):
pass
else:
unmatched.append(country)
unmatched
df["Year"].dtypes
#Retain values for 2010, 2015 and 2020 only
df=df[(df.Year==2000)|(df.Year==2005)|(df.Year==2010) | (df.Year==2015) | (df.Year==2020)]
#Drop Code column
df.drop("Code",axis=1, inplace=True)
#Remove unmatched items from df
df=df[(df.Entity!="Cyprus") & (df.Entity!="EU-27") & (df.Entity!="EU27+1") & (df.Entity!="Malta")]
#Make pivot
df=pd.pivot_table(df, index="Entity",columns="Year")
df
df.columns=["2000","2005","2010","2015","2020"]
df=df.reset_index()
df.rename({"Entity":"name"},axis=1,inplace=True)
df
selected_countries=europe[europe.name.isin(list(df.name))]
selected_countries
selected_countries=selected_countries.merge(df,on="name",how="left")
selected_countries
#Range of Variable you see as map color. Here I select the minimum and maximum of all the years selected.
vmin=selected_countries[["2000","2005","2010","2015","2020"]].min().min()
vmax=selected_countries[["2000","2005","2010","2015","2020"]].max().max()
fig,axs=plt.subplots(2,3) #3 columns and 1 row
fig.suptitle("Emissions Intensity from electricity generation in Europe 2000-2020", fontweight="bold",fontsize=15)
#Adjust space betweeen rows
plt.subplots_adjust(bottom=0.2, top=0.9, hspace=0.25)
axs[0,0]=europe.plot(color="whitesmoke",edgecolor="black",ax=axs[0,0])
selected_countries.plot("2000",cmap="Reds",edgecolor="black",ax=axs[0,0], vmin=vmin, vmax=vmax)
axs[0,0].set_title("2000")
axs[0,0].xaxis.set_visible(False)
axs[0,1]=europe.plot(color="whitesmoke",edgecolor="black",ax=axs[0,1])
selected_countries.plot("2005",cmap="Reds",edgecolor="black",ax=axs[0,1], vmin=vmin, vmax=vmax)
axs[0,1].set_title("2005")
axs[0,1].xaxis.set_visible(False)
axs[0,1].yaxis.set_visible(False)
axs[0,2]=europe.plot(color="whitesmoke",edgecolor="black",ax=axs[0,2])
selected_countries.plot("2010",cmap="Reds",edgecolor="black",ax=axs[0,2], vmin=vmin, vmax=vmax)
axs[0,2].set_title("2010")
axs[0,2].xaxis.set_visible(False)
axs[0,2].yaxis.set_visible(False)
axs[1,0]=europe.plot(color="whitesmoke",edgecolor="black",ax=axs[1,0])
selected_countries.plot("2015",cmap="Reds",edgecolor="black",ax=axs[1,0], vmin=vmin, vmax=vmax)
axs[1,0].set_title("2015")
axs[1,1]=europe.plot(color="whitesmoke",edgecolor="black",ax=axs[1,1])
selected_countries.plot("2020",cmap="Reds",edgecolor="black",ax=axs[1,1], vmin=vmin, vmax=vmax)
axs[1,1].set_title("2020")
axs[1,1].yaxis.set_visible(False)
axs[1,2]=europe.plot(color="whitesmoke",edgecolor="black",ax=axs[1,2])
axs[1,2].set_title("Future?")
axs[1,2].yaxis.set_visible(False)
# add colorbar
cax = fig.add_axes([0.92, 0.2, 0.03, 0.7]) #[left, bottom, width, height]
sm = plt.cm.ScalarMappable(cmap='Reds', norm=plt.Normalize(vmin=vmin, vmax=vmax))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
lgd=fig.colorbar(sm, cax=cax).set_label("gCO$_2$e/kWh", rotation=0,y=1.05, labelpad=-35)
plt.savefig("Emissions Intensity over the past two decades.jpeg",
dpi=300)
plt.show()
pd.set_option("display.max_columns",None)
df
df
df.set_index("name",inplace=True)
df=df.T
df[["Estonia","Poland","Sweden","United Kingdom","Germany","France"]].plot(marker="o",linestyle="dashed",figsize=(8,6))
plt.title("Carbon Intensity of Electricity Generation Of Selective Countries")
plt.xlabel("Years"); plt.ylabel("gCO$_2$/kWh")
lgd=plt.legend(bbox_to_anchor=(1,1))
plt.savefig("Selective Countries Carbon Intensity",
dpi=300,
bbox_extra_artists=(lgd,),
bbox_inches="tight")
plt.show()
selected_countries.head()
#Getting the lan and lat here from geometry data
selected_countries['coordinates']=selected_countries['geometry'].apply(lambda x: x.representative_point().coords[:][0])
selected_countries.head()
```
## Analysing carbon intensity in 2020
```
fig, ax=plt.subplots()
ax=europe.plot(color="whitesmoke",
edgecolor='black',
ax=ax)
selected_countries.plot("2020",
ax=ax,
cmap="Reds",
legend=True)
#Add names of county here
for idx, row in selected_countries.iterrows():
plt.annotate(s=row["name"], xy=row['coordinates'],
horizontalalignment='center', color='black',fontsize=10, fontweight='light')
plt.title("Carbon Intensity of Electricity Generation in Europe in 2020 (gCO$_2$/kWh)")
plt.savefig("2020 figure", dpi=300)
#cax = fig.add_axes([0.92, 0.2, 0.03, 0.7])
#sm=plt.cm.ScalarMappable(cmap='Reds',
# norm=plt.Normalize(vmin=selected_countries["2020"].min(), vmax=selected_countries["2020"].max()))
#lgd=fig.colorbar(sm,cax=cax).set_label("gCO$_2$e/kWh", rotation=0,y=1.05, labelpad=-35)
```
| github_jupyter |
# Xarray-spatial
### User Guide: Zonal
-----
Xarray-spatial's zonal functions provide an easy way to generate statistics for zones within a raster aggregate. It's set up with a default set of calculations, or you can input any set of custom calculations you'd like to perform.
[Generate terrain](#Generate-Terrain-Data)
[Zonal Statistics](#Zonal-Statistics)
-----------
#### Let's use datashader to render our images...
To get started we'll bring in numpy and some functions from datashader to easily render our images.
```
import numpy as np
import pandas as pd
import datashader as ds
from datashader.transfer_functions import shade
from datashader.transfer_functions import stack
from datashader.transfer_functions import dynspread
from datashader.transfer_functions import set_background
from datashader.colors import Elevation
import xrspatial
```
## Generate Terrain Data
The rest of the geo-related functions focus on raster data or data that's been aggregates into the row-column grid of cells for an image raster. To demonstrate using these raster-based functions, we'll first use xarray-spatial's generate_terrain to generate a fake elevation terrain raster. We use datashader's Canvas as a quick base to set up a new raster.
```
from xrspatial import generate_terrain
W = 800
H = 600
cvs = ds.Canvas(plot_width=W, plot_height=H, x_range=(-20e6, 20e6), y_range=(-20e6, 20e6))
terrain = generate_terrain(canvas=cvs)
shade(terrain, cmap=['black', 'white'], how='linear')
```
We can also apply datashader's Elevation colormap imported above to give a more intuitive terrain image.
```
shade(terrain, cmap=Elevation, how='linear')
```
## Zonal Statistics
Zonal statistics calculates summary statistics for specific areas or *zones* within an xarray.DataArray aggregate. Specific zones within an aggregate are defined by creating a corresponding aggregate of the same shape and setting the value at each cell to a unique non-zero integer representing a unique zone id.
For example, if we set all the values in the top row of the zones aggregate to 3 and apply this to the original values aggregate, zonal stats will calculate statisitics for all the values in the corresponding top row of the values aggregate and return the results as stats for zone \#3.
The output of zonal stats is in the form of a pandas DataFrame, with a row for each zone.
Let's set up an example.
Imagine you go on a six-day hike.
- We can represent the area with a terrain raster.
- In that terrain, we can represent each day's path as a line segment from your start to finish point.
- We can set this up with a pandas dataframe containing the start and finish points, which we then aggregate with Canvas.line.
Let's take a look at these line zones overlayed on the fake terrain.
```
from xrspatial import hillshade
from datashader.colors import Set1
cvs = ds.Canvas(plot_width=W, plot_height=H, x_range=(-20, 20), y_range=(-20, 20))
terrain = generate_terrain(canvas=cvs)
terrain_shaded = shade(terrain, cmap=Elevation, alpha=128, how='linear')
illuminated = hillshade(terrain)
illuminated_shaded = shade(illuminated, cmap=['gray', 'white'], alpha=255, how='linear')
zone_df = pd.DataFrame({
'x': [-11, -5, 4, 12, 14, 18, 19],
'y': [-5, 4, 10, 13, 13, 13, 10],
'trail_segement_id': [11, 12, 13, 14, 15, 16, 17]
})
zones_agg = cvs.line(zone_df, 'x', 'y', ds.sum('trail_segement_id'))
zones_shaded = dynspread(shade(zones_agg, cmap=Set1), max_px=5)
stack(illuminated_shaded, terrain_shaded, zones_shaded)
```
Now, we can apply zonal stats, after quickly correcting for nan values.
```
from xrspatial import zonal_stats
zones_agg.values = np.nan_to_num(zones_agg.values, copy=False).astype(int)
zonal_stats(zones_agg, terrain)
```
#### Calculate custom stats for each zone:
We can also put in our own set of stats calculations to perform instead of the default ones above.
- We set up a dict with our desired functions and input that as the third argument to `zonal_stats`.
- Below, we try out a range function and min and max functions.
```
custom_stats = dict(elevation_change=lambda zone: zone.max() - zone.min(),
elevation_min=np.min,
elevation_max=np.max)
zonal_stats(zones_agg, terrain, custom_stats)
```
Here the zones are defined by line segments, but they can be any spatial pattern or, more specifically, any region computable as a Datashader aggregate.
| github_jupyter |
### SVM (Support Vector Machine)
In this notebook we are going to implement the Support Vector Machine algorithim from scratch using python and numpy.
### Definition
Support Vector Machine (SVM) is a relatively simple Supervised Machine Learning Algorithm used for classification and/or regression. It is more preferred for classification but is sometimes very useful for regression as well. Basically, SVM finds a hyper-plane that creates a boundary between the types of data. In 2-dimensional space, this hyper-plane is nothing but a line.
In SVM, we plot each data item in the dataset in an N-dimensional space, where N is the number of features/attributes in the data. Next, find the optimal hyperplane to separate the data. So by this, you must have understood that inherently, SVM can only perform binary classification (i.e., choose between two classes). However, there are various techniques to use for multi-class problems.
### Imports for implementation.
```
import numpy as np
```
### The SVM class
In the following code cell we are going then to create an SVM algorithm using numpy.
```
class SVM:
"""
The init function takes the following parameters:
* lr - leaning rate defalt is .001
* lambda_param - default is .01
* n_inters - number of iterations default is 10000
"""
def __init__(self, lr=0.001, lambda_param=.01, n_iters=1000):
self.lr= lr
self.lambda_param = lambda_param
self.n_iters = n_iters
self.w = None
self.b = None
def fit(self, X, y):
n_samples, n_features = X.shape
y_ = np.where(y<=0, -1, 1)
self.b = 0
self.w = np.zeros(n_features)
for _ in range(self.n_iters):
for i, x_i in enumerate(X):
condition = y_[i] * (np.dot(x_i, self.w) - self.b) >= 1
if condition:
self.w -= self.lr * (2 * self.lambda_param * self.w)
else:
self.w -= self.lr * (
2 * self.lambda_param * self.w - np.dot(x_i, y_[i])
)
self.b -= self.lr * y_[i]
def predict(self, X):
approx = np.dot(X, self.w) - self.b
return np.sign(approx)
def evaluate(self, y_true, y_pred):
return f"Acc: {np.equal(y_true, y_pred).sum()/len(y_true) * 100}%"
```
### Fit, Predict and evaluate
In the following code cells we are going to create a dummy dataset from `sklearn` and call the fit, predict and evaluate function from the SVM classifier
```
from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
X, y = datasets.make_blobs(
n_samples=150, n_features=2, centers=2, cluster_std=1.05,
random_state=42
)
y = np.where(y == 0, -1, 1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=.2
)
```
### Intance of a classifier
```
clf = SVM()
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
predictions[:10]
clf.evaluate(predictions, y_test)
```
### Ref
1. [geeks for geeks](https://www.geeksforgeeks.org/introduction-to-support-vector-machines-svm/)
2. [python engineer](https://github.com/python-engineer/MLfromscratch/blob/master/mlfromscratch/svm.py)
```
```
| github_jupyter |
# Exploration of the UC Berkeley Milling Data Set
> In this notebook we introduce a metal machining data set. We’ll explore the data set and see how it is structured. Data exploration is an important first step in any new data science problem. (notebook originally featured at [tvhahn.com](https://www.tvhahn.com/), official GitHub repo: https://github.com/tvhahn/ml-tool-wear)
Let’s pretend that you're at a manufacturing company engaged in metal machining. You're an engineer working at this company, and the CEO has tasked you to develop a system to detect tool wear. Where to start?
UC Berkeley created a milling data set in 2008, which you can download from the [NASA Prognostics Center of Excellence web page](https://ti.arc.nasa.gov/tech/dash/groups/pcoe/prognostic-data-repository/). We’ll use this data set to try out some ideas. In this notebook we’ll briefly cover what milling is before exploring and visualizing the data set.
## Setup Notebook
The notebook can be run with google colab. Alternatively, clone the repo and run on your local machine. You'll need python 3.6+ with the following packages in your local environment:
* Numpy
* SciPy
* Pandas
* Matplotlib
* Seaborn
First, we will load all the neccessary packages.
```
import numpy as np
import scipy.io as sio # for reading matlab files
import pathlib
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import zipfile
%matplotlib inline
```
Set the appropriate working folders.
```
# move into the root directory of 'Manufacturing-Data-Science-with-Python'
os.chdir('../')
root_dir = Path.cwd() # set the root directory as a Pathlib path
folder_raw_data = root_dir / 'Data Sets/milling_uc_berkeley/raw/' # raw data folder that holds the .zip .mat files for milling data
folder_processed_data = root_dir / 'Data Sets/milling_uc_berkeley/processed/' # processed data folder
working_folder = root_dir / 'Metal Machining' # working folder
```
We now need to prepare the notebook by downloading the milling data file and other important files. This needs to be done if you are running in google colab. If the repository has been cloned from github, then there is no need.
```
# if the the raw data folder does not exist, then you are likely
# in a google colab environment. In that case, we will create the
# raw data and processed data folders and download the appropriate
# files
if folder_raw_data.exists() == False:
pathlib.Path(folder_raw_data).mkdir(parents=True, exist_ok=True)
os.chdir(folder_raw_data)
!wget 'https://github.com/tvhahn/Manufacturing-Data-Science-with-Python/raw/master/Data%20Sets/milling_uc_berkeley/raw/mill.zip'
if folder_processed_data.exists() == False:
pathlib.Path(folder_processed_data).mkdir(parents=True, exist_ok=True)
os.chdir(folder_processed_data)
!wget 'https://raw.githubusercontent.com/tvhahn/Manufacturing-Data-Science-with-Python/master/Data%20Sets/milling_uc_berkeley/processed/labels_with_tool_class.csv'
# if working folder does not exist, then create it
pathlib.Path(working_folder).mkdir(parents=True, exist_ok=True)
if (working_folder / 'data_prep.py').exists() == False:
os.chdir(working_folder)
# download important python files into the 'Metal Machining' directory
!wget 'https://raw.githubusercontent.com/tvhahn/Manufacturing-Data-Science-with-Python/master/Metal%20Machining/data_prep.py'
# extract mill.mat from the zip file
with zipfile.ZipFile(folder_raw_data / 'mill.zip', 'r') as zip_ref:
zip_ref.extractall(folder_raw_data)
os.chdir(working_folder)
```
# What is milling?
In milling, a rotary cutter removes material as it moves along a work piece. Most often, milling is performed on metal – it's metal machining – and that’s what is happening at the company you’re at.
The picture below demonstrates a face milling procedure. The cutter is progressed forward while rotating. As the cutter rotates, the tool inserts “bite” into the metal and remove it.
<br>
<div style="text-align: center; ">
<figure>
<img src="images/face_milling.svg" alt="milling tool cutting into metal" style="background:none; border:none; box-shadow:none; text-align:center" width="400px"/>
<div style="text-align: center; ">
<br>
<figcaption style="color:grey; font-size:smaller"> A milling tool has serveral tool inserts on it. As the tool rotates, and is pushed forward, the inserts cut into the metal. (Image modified from <a href="https://commons.wikimedia.org/wiki/File:Fraisage_surfacage.svg#/media/File:Fraisage_surfacage.svg">Wikipedia</a>)</figcaption>
</div>
</figure>
</div>
Over time, the tool inserts wear. Specifically, the flank of the tool wears, as shown below. In the UC Berkeley milling data set the flank wear (VB) is measured as the tool wears.
<div style="text-align: center; ">
<figure>
<img src="images/flank_wear.svg" alt="flank wear on tool insert" style="background:none; border:none; box-shadow:none; text-align:center" width="500px"/>
<!-- <div style="text-align: left; "> -->
<figcaption style="color:grey; font-size:smaller">Flank wear on a tool insert (perspective and front view). <i>VB</i> is the measure of flank wear. (Image from author)</figcaption>
<!-- </div> -->
</figure>
</div>
# Data Exploration
Data exploration is the first important step when tackling any new data science problem. Where to begin? The first step is understanding how the data is structured. How is the data stored? In a database? In an array? Where is the meta-data (things like labels and time-stamps)?
## Data Structure
The UC Berkeley milling data set is contained in a structured MATLAB array. We can load the .mat file using the scipy.io module and the loadmat function.
```
os.chdir(working_folder) # make sure you're in the right folder
# load the data from the matlab file
m = sio.loadmat(folder_raw_data / 'mill.mat',struct_as_record=True)
```
The data is stored as a dictionary. Let's look to see what it is made of.
```
# show some of the info from the matlab file
print('Keys in the matlab dict file: \n', m.keys(), '\n')
```
Only the 'mill' part of the dictionary contains useful information. We'll put that in a new numpy array called 'data.
```
# check to see what m['mill'] is
print(type(m['mill']))
# store the 'mill' data in a seperate np array
data = m['mill']
```
We now want to see what the 'data' array is made up of.
```
# store the field names in the data np array in a tuple, l
l = data.dtype.names
print('List of the field names:\n',l)
```
## Meta-Data
The documentation with the UC Berkeley milling data set contains additional information, and highlights information about the meta-data. The data set is made of 16 cases of milling tools performing cuts in metal. Six cutting parameters were used in the creation of the data:
* the metal type (either cast iron or steel, labelled as 1 or 2 in the data set, respectively)
* the depth of cut (either 0.75 mm or 1.5 mm)
* the feed rate (either 0.25 mm/rev or 0.5 mm/rev)
Each of the 16 cases is a combination of the cutting parameters (for example, case one has a depth of cut of 1.5 mm, a feed rate of 0.5 mm/rev, and is performed on cast iron).
The cases are made up of individual cuts from when the tool is new to degraded or worn. There are 167 cuts (called 'runs' in the documentation) amongst all 16 cases. Many of the cuts are accompanied by a measure of flank wear (VB). We'll use this later to label the cuts as eigther healthy, degraded, or worn.
Finally, six signals were collected during each cut: acoustic emission (AE) signals from the spindle and table; vibration from the spindle and table; and AC/DC current from the spindle motor. The signals were collected at 250 Hz and each cut has 9000 sampling points, for a total signal length of 36 seconds.
We will extract the meta-data from the numpy array and store it as a pandas dataframe -- we'll call this dataframe `df_labels` since it contains the label information we'll be interested in. This is how we create the dataframe.
```
# store the field names in the data np array in a tuple, l
l = data.dtype.names
# create empty dataframe for the labels
df_labels = pd.DataFrame()
# get the labels from the original .mat file and put in dataframe
for i in range(7):
# list for storing the label data for each field
x = []
# iterate through each of the unique cuts
for j in range(167):
x.append(data[0,j][i][0][0])
x = np.array(x)
df_labels[str(i)] = x
# add column names to the dataframe
df_labels.columns = l[0:7]
# create a column with the unique cut number
df_labels['cut_no'] = [i for i in range(167)]
df_labels.head()
```
## Data Visualization
Visuallizing a new data set is a great way to get an understanding of what is going on, and detect any strange things going on. I also love data visualization, so we'll create a beautiful graphic using Seaborn and Matplotlib.
There are only 167 cuts in this data set, which isn't a huge amount. Thus, we can visually inspect each cut to find abnormalities. Fortunately, I've already done that for you.... Below is a highlight.
First, we'll look at a fairly "normal" cut -- cut number 167.
```
# look at cut number 167 (index 166)
cut_no = 166
fig, ax = plt.subplots()
ax.plot(data[0,cut_no]['smcAC'], label='smcAC')
ax.plot(data[0,cut_no]['smcDC'], label='smcDC')
ax.plot(data[0,cut_no]['vib_table'], label='vib_table')
ax.plot(data[0,cut_no]['vib_spindle'], label='vib_spindle')
ax.plot(data[0,cut_no]['AE_table'], label='AE_table')
ax.plot(data[0,cut_no]['AE_spindle'], label='AE_spindle')
plt.legend()
```
However, if you look at all the cuts, you'll find that cuts 18 and 95 (index 17 and 94) are off -- they will need to be discarded when we start building our anomaly detection model.
```
# plot cut no. 18 (index 17). Only plot current signals for simplicity.
cut_no = 17
fig, ax = plt.subplots()
ax.plot(data[0,cut_no]['smcAC'], label='smcAC')
ax.plot(data[0,cut_no]['smcDC'], label='smcDC')
plt.legend()
# plot cut no. 95 (index 94). Only plot current signals for simplicity.
cut_no = 94
fig, ax = plt.subplots()
ax.plot(data[0,cut_no]['smcAC'], label='smcAC')
ax.plot(data[0,cut_no]['smcDC'], label='smcDC')
plt.legend()
```
Cut 106 is also weird...
```
cut_no = 105
fig, ax = plt.subplots()
ax.plot(data[0,cut_no]['smcAC'], label='smcAC')
ax.plot(data[0,cut_no]['smcDC'], label='smcDC')
plt.legend()
```
Finally, we'll create a beautiful plot that nicely visualizes each of the six signals together.
```
def plot_cut(cut_signal, signals_trend, cut_no):
# define colour palette and seaborn style
pal = sns.cubehelix_palette(6, rot=-0.25, light=0.7)
sns.set(style="white", context="notebook")
fig, axes = plt.subplots(
6, 1, dpi=150, figsize=(5, 6), sharex=True, constrained_layout=True,
)
# the "revised" signal names so it looks good on the chart
signal_names_revised = [
"AE Spindle",
"AE Table",
"Vibe Spindle",
"Vibe Table",
"DC Current",
"AC Current",
]
# go through each of the signals
for i in range(6):
# plot the signal
# note, we take the length of the signal (9000 data point)
# and divide it by the frequency (250 Hz) to get the x-axis
# into seconds
axes[i].plot(np.arange(0,9000)/250.0,
cut_signal[signals_trend[i]],
color=pal[i],
linewidth=0.5,
alpha=1)
axis_label = signal_names_revised[i]
axes[i].set_ylabel(
axis_label, fontsize=7,
)
# if it's not the last signal on the plot
# we don't want to show the subplot outlines
if i != 5:
axes[i].spines["top"].set_visible(False)
axes[i].spines["right"].set_visible(False)
axes[i].spines["left"].set_visible(False)
axes[i].spines["bottom"].set_visible(False)
axes[i].set_yticks([]) # also remove the y-ticks, cause ugly
# for the last signal we will show the x-axis labels
# which are the length (in seconds) of the signal
else:
axes[i].spines["top"].set_visible(False)
axes[i].spines["right"].set_visible(False)
axes[i].spines["left"].set_visible(False)
axes[i].spines["bottom"].set_visible(False)
axes[i].set_yticks([])
axes[i].tick_params(axis="x", labelsize=7)
axes[i].set_xlabel('Seconds', size=5)
signals_trend = list(l[7:]) # there are 6 types of signals, smcAC to AE_spindle
signals_trend = signals_trend[::-1] # reverse the signal order so that it is matching other charts
# we'll plot signal 146 (index 145)
cut_signal = data[0, 145]
plot_cut(cut_signal, signals_trend, "cut_146")
# plt.savefig('cut_signals.png',format='png') # save the figure
plt.show()
```
| github_jupyter |
# Tutorial 4: Turbine Assembly
Here's what we've done so far in these tutotirals:
+ Ran two simple cost models of turbines. In these, we estiamted masses of components and cost per kilogram of those components.
+ We learned how OpenMDAO makes *components* when we calculated the Betz limit by modelling an idealized `ActuatorDisc` as a subclass of `ExplicitComponent`.
+ We learned how to group multiple components into groups with the OpenMDAO `Group` class when we modelled the Sellar problem.
We can now turn our attention back to WISDEM and put together a rotor, drivetrain and tower to model a complete wind turbine. We will use the tools we have gained so far in these tutorials to accomplish this.
This is a significant increase in complexity from our previous toy examples. This tutorial doesn't aim to give an exhaustive line-by-line explanation of nearly 400 lines of source code. However, these fundamental building blocks of components, groups and susbsytems are used to model systems of significant complexity.
## First, we need to import our dependencies
There are many dependencies we need to import. Of key interesst to use here are various parts of WISDEM that we will assemble into our model.
```python
from wisdem.rotorse.rotor import RotorSE, Init_RotorSE_wRefBlade
from wisdem.rotorse.rotor_geometry_yaml import ReferenceBlade
from wisdem.towerse.tower import TowerSE
from wisdem.commonse import NFREQ
from wisdem.commonse.environment import PowerWind, LogWind
from wisdem.commonse.turbine_constraints import TurbineConstraints
from wisdem.turbine_costsse.turbine_costsse_2015 import Turbine_CostsSE_2015
from wisdem.plant_financese.plant_finance import PlantFinance
from wisdem.drivetrainse.drivese_omdao import DriveSE
```
```
from __future__ import print_function
import numpy as np
from pprint import pprint
from openmdao.api import IndepVarComp, ExplicitComponent, Group, Problem, ScipyOptimizeDriver, SqliteRecorder, NonlinearRunOnce, DirectSolver
try:
from openmdao.api import pyOptSparseDriver
except:
pass
from wisdem.rotorse.rotor import RotorSE, Init_RotorSE_wRefBlade
from wisdem.rotorse.rotor_geometry_yaml import ReferenceBlade
from wisdem.towerse.tower import TowerSE
from wisdem.commonse import NFREQ
from wisdem.commonse.environment import PowerWind, LogWind
from wisdem.commonse.turbine_constraints import TurbineConstraints
from wisdem.turbine_costsse.turbine_costsse_2015 import Turbine_CostsSE_2015
from wisdem.plant_financese.plant_finance import PlantFinance
from wisdem.drivetrainse.drivese_omdao import DriveSE
from wisdem.commonse.mpi_tools import MPI
```
After we import our libraries, we make a subclass of `Group` which will hold our assembly of the turbine.
For our assembly, we will first need some independent variables. For this we will use `IndepVarComp`. Remember that `IndepVarComp` creates *outputs* that send the variables to the *inputs* of other subsystems in the same group. At the bottom of this section, note the `promotes=['*']` which makes these variables available to other subsystems.
```python
myIndeps = IndepVarComp()
myIndeps.add_discrete_output('crane', False)
# Turbine Costs
myIndeps.add_discrete_output('bearing_number', 0)
# Tower and Frame3DD options
myIndeps.add_output('project_lifetime', 0.0, units='yr')
myIndeps.add_output('max_taper_ratio', 0.0)
myIndeps.add_output('min_diameter_thickness_ratio', 0.0)
# Environment
myIndeps.add_output('wind_bottom_height', 0.0, units='m')
myIndeps.add_output('wind_beta', 0.0, units='deg')
myIndeps.add_output('cd_usr', -1.)
# Design standards
myIndeps.add_output('gamma_b', 0.0)
myIndeps.add_output('gamma_n', 0.0)
# RNA
myIndeps.add_discrete_output('rna_weightM', True)
# Column
myIndeps.add_output('morison_mass_coefficient', 2.0)
myIndeps.add_output('material_density', 0.0, units='kg/m**3')
myIndeps.add_output('E', 0.0, units='N/m**2')
myIndeps.add_output('yield_stress', 0.0, units='N/m**2')
# Pontoons
myIndeps.add_output('G', 0.0, units='N/m**2')
# LCOE
myIndeps.add_output('labor_cost_rate', 0.0, units='USD/min')
myIndeps.add_output('material_cost_rate', 0.0, units='USD/kg')
myIndeps.add_output('painting_cost_rate', 0.0, units='USD/m**2')
myIndeps.add_discrete_output('number_of_turbines', 0)
myIndeps.add_output('annual_opex', 0.0, units='USD/kW/yr') # TODO: Replace with output connection
myIndeps.add_output('bos_costs', 0.0, units='USD/kW') # TODO: Replace with output connection
myIndeps.add_output('fixed_charge_rate', 0.0)
myIndeps.add_output('wake_loss_factor', 0.0)
self.add_subsystem('myIndeps', myIndeps, promotes=['*'])
```
Then we add a `RotorSE` group that models our rotor. Note the `promotes=['*']` again. This makes all of its variables available to the other subsystems for implicit connection to other variables of the same name. We attach this rotor as a subsystem in our group. This subsystem is called `rotorse`.
```python
self.add_subsystem('rotorse', RotorSE(RefBlade=RefBlade,
npts_coarse_power_curve=20,
npts_spline_power_curve=200,
regulation_reg_II5=True,
regulation_reg_III=False,
Analysis_Level=Analysis_Level,
FASTpref=self.options['FASTpref'],
topLevelFlag=True), promotes=['*'])
```
After that we add a `DriveSE` group which models our drivetrain. For this group, we do not use `promotes=['*']`; rather, we explicitly promote the variables we want available to the other subsystems. This subsystem is called `drive`.
```python
self.add_subsystem('drive', DriveSE(debug=False,
number_of_main_bearings=1,
topLevelFlag=False),
promotes=['machine_rating', 'overhang',
'hub_mass','bedplate_mass','gearbox_mass','generator_mass','hss_mass','hvac_mass','lss_mass','cover_mass',
'pitch_system_mass','platforms_mass','spinner_mass','transformer_mass','vs_electronics_mass','yaw_mass'])
```
As our last step of the turbine assembly, we add a `TowerSE` group which models our tower. Again, we explicitly list the variables we want to promote from this group. This subsystem is called `tow`.
```python
self.add_subsystem('tow', TowerSE(nLC=1,
nPoints=Nsection_Tow+1,
nFull=5*Nsection_Tow+1,
wind='PowerWind',
topLevelFlag=False),
promotes=['water_density','water_viscosity','wave_beta',
'significant_wave_height','significant_wave_period',
'material_density','E','G','tower_section_height',
'tower_wall_thickness', 'tower_outer_diameter',
'tower_outfitting_factor','tower_buckling_length',
'max_taper','min_d_to_t','rna_mass','rna_cg','rna_I',
'tower_mass','tower_I_base','hub_height',
'foundation_height','monopile','soil_G','soil_nu',
'suctionpile_depth','gamma_f','gamma_m','gamma_b','gamma_n','gamma_fatigue',
'labor_cost_rate','material_cost_rate','painting_cost_rate','z_full','d_full','t_full',
'DC','shear','geom','tower_force_discretization','nM','Mmethod','lump','tol','shift'])
```
The `tow`, `drive` and `rotorse` subsystems comprise our turbine. But we need to place a tip clearance constraint on this assembly to ensure that the tips of the rotor do not collide with the tower. The `TurbineConstraints` group enforces this constraint. We'll add this as a subsystem called `tcons`.
```python
self.add_subsystem('tcons', TurbineConstraints(nFull=5*Nsection_Tow+1), promotes=['*'])
```
Recall our simple cost model for the turbine. Let's add that as the subsystem `tcost`.
```python
self.add_subsystem('tcost', Turbine_CostsSE_2015(verbosity=self.options['VerbosityCosts'], topLevelFlag=False), promotes=['*'])
```
Finally, we want to calculate the LCOE of the turbine.
```python
self.add_subsystem('plantfinancese', PlantFinance(verbosity=self.options['VerbosityCosts']), promotes=['machine_rating', 'lcoe'])
```
OpenMDAO needs to be told how to connect the components that do use `promotes=['*']`. For exmaple, for `DriveSE` there a couple of variables that are connected this way:
```python
# Connections to DriveSE
self.connect('diameter', 'drive.rotor_diameter')
self.connect('rated_Q', 'drive.rotor_torque')
```
In these lines, OpenMDAO finds the `diameter` variable promoted into the group's namespace and connects it to the `rotor_diameter` input in the subsystem `drive`. This is addressed as `drive.rotor_diameter`. Similarly `rated_Q` is connected `driver.rotor_torque`. Such connections are good way to "wire" variables with different names together. There are many such connections in our code.
With these subsystems, we can make our group as shown below.
```
# Group to link the openmdao components
class LandBasedTurbine(Group):
def initialize(self):
self.options.declare('RefBlade')
self.options.declare('FASTpref', default={})
self.options.declare('Nsection_Tow', default = 6)
self.options.declare('VerbosityCosts', default = True)
def setup(self):
RefBlade = self.options['RefBlade']
Nsection_Tow = self.options['Nsection_Tow']
if 'Analysis_Level' in self.options['FASTpref']:
Analysis_Level = self.options['FASTpref']['Analysis_Level']
else:
Analysis_Level = 0
# Define all input variables from all models
myIndeps = IndepVarComp()
myIndeps.add_discrete_output('crane', False)
# Turbine Costs
myIndeps.add_discrete_output('bearing_number', 0)
# Tower and Frame3DD options
myIndeps.add_output('project_lifetime', 0.0, units='yr')
myIndeps.add_output('max_taper_ratio', 0.0)
myIndeps.add_output('min_diameter_thickness_ratio', 0.0)
# Environment
myIndeps.add_output('wind_bottom_height', 0.0, units='m')
myIndeps.add_output('wind_beta', 0.0, units='deg')
myIndeps.add_output('cd_usr', -1.)
# Design standards
myIndeps.add_output('gamma_b', 0.0)
myIndeps.add_output('gamma_n', 0.0)
# RNA
myIndeps.add_discrete_output('rna_weightM', True)
# Column
myIndeps.add_output('morison_mass_coefficient', 2.0)
myIndeps.add_output('material_density', 0.0, units='kg/m**3')
myIndeps.add_output('E', 0.0, units='N/m**2')
myIndeps.add_output('yield_stress', 0.0, units='N/m**2')
# Pontoons
myIndeps.add_output('G', 0.0, units='N/m**2')
# LCOE
myIndeps.add_output('labor_cost_rate', 0.0, units='USD/min')
myIndeps.add_output('material_cost_rate', 0.0, units='USD/kg')
myIndeps.add_output('painting_cost_rate', 0.0, units='USD/m**2')
myIndeps.add_discrete_output('number_of_turbines', 0)
myIndeps.add_output('annual_opex', 0.0, units='USD/kW/yr') # TODO: Replace with output connection
myIndeps.add_output('bos_costs', 0.0, units='USD/kW') # TODO: Replace with output connection
myIndeps.add_output('fixed_charge_rate', 0.0)
myIndeps.add_output('wake_loss_factor', 0.0)
self.add_subsystem('myIndeps', myIndeps, promotes=['*'])
# Add components
self.add_subsystem('rotorse', RotorSE(RefBlade=RefBlade,
npts_coarse_power_curve=20,
npts_spline_power_curve=200,
regulation_reg_II5=True,
regulation_reg_III=False,
Analysis_Level=Analysis_Level,
FASTpref=self.options['FASTpref'],
topLevelFlag=True), promotes=['*'])
self.add_subsystem('drive', DriveSE(debug=False,
number_of_main_bearings=1,
topLevelFlag=False),
promotes=['machine_rating', 'overhang',
'hub_mass','bedplate_mass','gearbox_mass','generator_mass','hss_mass','hvac_mass','lss_mass','cover_mass',
'pitch_system_mass','platforms_mass','spinner_mass','transformer_mass','vs_electronics_mass','yaw_mass'])
# Tower and substructure
self.add_subsystem('tow',TowerSE(nLC=1,
nPoints=Nsection_Tow+1,
nFull=5*Nsection_Tow+1,
wind='PowerWind',
topLevelFlag=False),
promotes=['water_density','water_viscosity','wave_beta',
'significant_wave_height','significant_wave_period',
'material_density','E','G','tower_section_height',
'tower_wall_thickness', 'tower_outer_diameter',
'tower_outfitting_factor','tower_buckling_length',
'max_taper','min_d_to_t','rna_mass','rna_cg','rna_I',
'tower_mass','tower_I_base','hub_height',
'foundation_height','monopile','soil_G','soil_nu',
'suctionpile_depth','gamma_f','gamma_m','gamma_b','gamma_n','gamma_fatigue',
'labor_cost_rate','material_cost_rate','painting_cost_rate','z_full','d_full','t_full',
'DC','shear','geom','tower_force_discretization','nM','Mmethod','lump','tol','shift'])
# Turbine constraints
self.add_subsystem('tcons', TurbineConstraints(nFull=5*Nsection_Tow+1), promotes=['*'])
# Turbine costs
self.add_subsystem('tcost', Turbine_CostsSE_2015(verbosity=self.options['VerbosityCosts'], topLevelFlag=False), promotes=['*'])
# LCOE Calculation
self.add_subsystem('plantfinancese', PlantFinance(verbosity=self.options['VerbosityCosts']), promotes=['machine_rating','lcoe'])
# Set up connections
# Connections to DriveSE
self.connect('diameter', 'drive.rotor_diameter')
self.connect('rated_Q', 'drive.rotor_torque')
self.connect('rated_Omega', 'drive.rotor_rpm')
self.connect('Fxyz_total', 'drive.Fxyz')
self.connect('Mxyz_total', 'drive.Mxyz')
self.connect('I_all_blades', 'drive.blades_I')
self.connect('mass_one_blade', 'drive.blade_mass')
self.connect('chord', 'drive.blade_root_diameter', src_indices=[0])
self.connect('Rtip', 'drive.blade_length', src_indices=[0])
self.connect('drivetrainEff', 'drive.drivetrain_efficiency', src_indices=[0])
self.connect('tower_outer_diameter', 'drive.tower_top_diameter', src_indices=[-1])
self.connect('material_density', 'tow.tower.rho')
# Connections to TowerSE
self.connect('drive.top_F', 'tow.pre.rna_F')
self.connect('drive.top_M', 'tow.pre.rna_M')
self.connect('drive.rna_I_TT', ['rna_I','tow.pre.mI'])
self.connect('drive.rna_cm', ['rna_cg','tow.pre.mrho'])
self.connect('drive.rna_mass', ['rna_mass','tow.pre.mass'])
self.connect('rs.gust.V_gust', 'tow.wind.Uref')
self.connect('wind_reference_height', ['tow.wind.zref','wind.zref'])
# self.connect('wind_bottom_height', ['tow.wind.z0','tow.wave.z_surface', 'wind.z0']) # offshore
self.connect('wind_bottom_height', ['tow.wind.z0', 'wind.z0'])
self.connect('shearExp', ['tow.wind.shearExp'])
# self.connect('morison_mass_coefficient','tow.cm') # offshore
self.connect('yield_stress', 'tow.sigma_y')
self.connect('max_taper_ratio', 'max_taper')
self.connect('min_diameter_thickness_ratio', 'min_d_to_t')
self.connect('rho', 'tow.windLoads.rho')
self.connect('mu', 'tow.windLoads.mu')
self.connect('wind_beta', 'tow.windLoads.beta')
# Connections to TurbineConstraints
self.connect('nBlades', ['blade_number', 'drive.number_of_blades'])
self.connect('control_maxOmega', 'rotor_omega')
self.connect('tow.post.structural_frequencies', 'tower_freq')
# Connections to TurbineCostSE
self.connect('mass_one_blade', 'blade_mass')
self.connect('drive.mainBearing.mb_mass', 'main_bearing_mass')
self.connect('total_blade_cost', 'blade_cost_external')
# Connections to PlantFinanceSE
self.connect('AEP', 'plantfinancese.turbine_aep')
self.connect('turbine_cost_kW', 'plantfinancese.tcc_per_kW')
self.connect('number_of_turbines', 'plantfinancese.turbine_number')
self.connect('bos_costs', 'plantfinancese.bos_per_kW')
self.connect('annual_opex', 'plantfinancese.opex_per_kW')
```
That's a lot of variables to connect! So let's make a function that can do this for us. Let's look at a few key lines of code for what is in this function:
First, we initialize parameters for our rotor model.
```python
prob = Init_RotorSE_wRefBlade(prob, blade, Analysis_Level = Analysis_Level, fst_vt = fst_vt)
```
Then there are a number of lines similar to the following:
```python
prob['material_density'] = 7850.0
```
Remember the `myIndeps.add_output('material_density', 0.0, units='kg/m**3')` line above? This is where we are providing the value for that independent variable.
```
def Init_LandBasedAssembly(prob, blade, Nsection_Tow, Analysis_Level = 0, fst_vt = {}):
prob = Init_RotorSE_wRefBlade(prob, blade, Analysis_Level = Analysis_Level, fst_vt = fst_vt)
# Environmental parameters for the tower
# prob['wind_reference_speed'] = 11.0
prob['wind_reference_height'] = prob['hub_height']
# Steel properties for the tower
prob['material_density'] = 7850.0
prob['E'] = 200e9
prob['G'] = 79.3e9
prob['yield_stress'] = 3.45e8
# Design constraints
prob['max_taper_ratio'] = 0.4
prob['min_diameter_thickness_ratio'] = 120.0
# Safety factors
prob['gamma_fatigue'] = 1.755 # (Float): safety factor for fatigue
prob['gamma_f'] = 1.35 # (Float): safety factor for loads/stresses
prob['gamma_m'] = 1.3 # (Float): safety factor for materials
prob['gamma_freq'] = 1.1 # (Float): safety factor for resonant frequencies
prob['gamma_n'] = 1.0
prob['gamma_b'] = 1.1
# Tower
prob['foundation_height'] = 0.0 #-prob['water_depth']
# prob['tower_outer_diameter'] = np.linspace(10.0, 3.87, Nsection_Tow+1)
prob['tower_outer_diameter'] = np.linspace(6.0, 3.87, Nsection_Tow+1)
prob['tower_section_height'] = (prob['hub_height'] - prob['foundation_height']) / Nsection_Tow * np.ones(Nsection_Tow)
prob['tower_wall_thickness'] = np.linspace(0.027, 0.019, Nsection_Tow)
prob['tower_buckling_length'] = 30.0
prob['tower_outfitting_factor'] = 1.07
prob['DC'] = 80.0
prob['shear'] = True
prob['geom'] = False
prob['tower_force_discretization'] = 5.0
prob['nM'] = 2
prob['Mmethod'] = 1
prob['lump'] = 0
prob['tol'] = 1e-9
prob['shift'] = 0.0
# Plant size
prob['project_lifetime'] = prob['lifetime'] = 20.0
prob['number_of_turbines'] = 200. * 1.e+006 / prob['machine_rating']
prob['annual_opex'] = 43.56 # $/kW/yr
prob['bos_costs'] = 517.0 # $/kW
# For RNA
prob['rna_weightM'] = True
# For turbine costs
# prob['offshore'] = False
prob['crane'] = False
prob['bearing_number'] = 2
prob['crane_cost'] = 0.0
prob['labor_cost_rate'] = 3.0
prob['material_cost_rate'] = 2.0
prob['painting_cost_rate'] = 28.8
# Gearbox
prob['drive.gear_ratio'] = 96.76 # 97:1 as listed in the 5 MW reference document
prob['drive.shaft_angle'] = prob['tilt']*np.pi / 180.0 # rad
prob['drive.shaft_ratio'] = 0.10
prob['drive.planet_numbers'] = [3, 3, 1]
prob['drive.shrink_disc_mass'] = 333.3 * prob['machine_rating'] / 1e6 # estimated
prob['drive.carrier_mass'] = 8000.0 # estimated
prob['drive.flange_length'] = 0.5
prob['overhang'] = 5.0
prob['drive.distance_hub2mb'] = 1.912 # length from hub center to main bearing, leave zero if unknown
prob['drive.gearbox_input_xcm'] = 0.1
prob['drive.hss_input_length'] = 1.5
prob['drive.yaw_motors_number'] = 1
return prob
```
What do we have so far?
1. A group of subsystems that model our rotor, drivetrain and tower
1. A function which sets the values of our independent variables needed by that group.
What remains is to set up our problem to optimize our group as initialized by our function. Again, here are some highlights of the code below:
```python
optFlag = False
```
If this is set to `True`, the model will run repeatedly to optimize the problem, eventually outputting the design variables. But for the purposes of a quick demo, we can set this to `False`. A true optimization would take a long time.
```python
# Reference rotor design
fname_schema = "../wisdem/rotorse/turbine_inputs/IEAontology_schema.yaml"
fname_input = "../wisdem/rotorse/turbine_inputs/nrel5mw_mod_update.yaml"
Analysis_Level = 0 # 0: Run CCBlade; 1: Update FAST model at each iteration but do not run; 2: Run FAST w/ ElastoDyn; 3: (Not implemented) Run FAST w/ BeamDyn
# Initialize blade design
refBlade = ReferenceBlade()
refBlade.verbose = True
refBlade.NINPUT = 8
refBlade.NPTS = 50
refBlade.spar_var = ['Spar_Cap_SS', 'Spar_Cap_PS'] # SS, then PS
refBlade.te_var = 'TE_reinforcement'
refBlade.validate = False
refBlade.fname_schema = fname_schema
blade = refBlade.initialize(fname_input)
# Initialize tower design
Nsection_Tow = 6
```
These lines set up the blade design we will use in our rotor. The `.yaml` contain the specifications of the blades in our rotor. YAML is a format that makes writing these specifications more convenient than writing them in Python. We also set up the number of sections in the tower here.
```python
prob = Problem()
prob.model = LandBasedTurbine(RefBlade=blade, Nsection_Tow=Nsection_Tow, VerbosityCosts=True)
prob.setup()
prob = Init_LandBasedAssembly(prob, blade, Nsection_Tow)
prob.model.nonlinear_solver = NonlinearRunOnce()
prob.model.linear_solver = DirectSolver()
```
Here, we create a problem and set its model to be our `LandBasedTurbine` we created above. After we do that, we pass our problm to the `Init_LandBasedAssembly()` function which takes our original problem and uses it to create another problem. After that we set solvers on our new `Problem` we can run it with
```python
prob.run_driver()
```
```
# Reference rotor design
fname_schema = "../wisdem/rotorse/turbine_inputs/IEAontology_schema.yaml"
fname_input = "../wisdem/rotorse/turbine_inputs/nrel5mw_mod_update.yaml"
Analysis_Level = 0 # 0: Run CCBlade; 1: Update FAST model at each iteration but do not run; 2: Run FAST w/ ElastoDyn; 3: (Not implemented) Run FAST w/ BeamDyn
# Initialize blade design
refBlade = ReferenceBlade()
refBlade.verbose = True
refBlade.NINPUT = 8
refBlade.NPTS = 50
refBlade.spar_var = ['Spar_Cap_SS', 'Spar_Cap_PS'] # SS, then PS
refBlade.te_var = 'TE_reinforcement'
refBlade.validate = False
refBlade.fname_schema = fname_schema
blade = refBlade.initialize(fname_input)
# Initialize tower design
Nsection_Tow = 6
# Create a problem for our LandBasedTurbine
prob = Problem()
prob.model=LandBasedTurbine(RefBlade=blade, Nsection_Tow=Nsection_Tow, VerbosityCosts=True)
prob.setup()
prob = Init_LandBasedAssembly(prob, blade, Nsection_Tow)
prob.model.nonlinear_solver = NonlinearRunOnce()
prob.model.linear_solver = DirectSolver()
prob.model.approx_totals()
prob.run_driver()
```
Here we see the output from the `TurbineCostSE` and `Plant_finance_SE`. (Don't worry about the `False`)
| github_jupyter |
# Azure Kubernetes Service (AKS) Deep MNIST
In this example we will deploy a tensorflow MNIST model in the Azure Kubernetes Service (AKS).
This tutorial will break down in the following sections:
1) Train a tensorflow model to predict mnist locally
2) Containerise the tensorflow model with our docker utility
3) Send some data to the docker model to test it
4) Install and configure Azure tools to interact with your cluster
5) Use the Azure tools to create and setup AKS cluster with Seldon
6) Push and run docker image through the Azure Container Registry
7) Test our Elastic Kubernetes deployment by sending some data
Let's get started! 🚀🔥
## Dependencies:
* Helm v3.0.0+
* A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM)
* kubectl v1.14+
* az CLI v2.0.66+
* Python 3.6+
* Python DEV requirements
## 1) Train a tensorflow model to predict mnist locally
We will load the mnist images, together with their labels, and then train a tensorflow model to predict the right labels
```
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
import tensorflow as tf
if __name__ == '__main__':
x = tf.placeholder(tf.float32, [None,784], name="x")
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b, name="y")
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels}))
saver = tf.train.Saver()
saver.save(sess, "model/deep_mnist_model")
```
## 2) Containerise the tensorflow model with our docker utility
First you need to make sure that you have added the .s2i/environment configuration file in this folder with the following content:
```
!cat .s2i/environment
```
Now we can build a docker image named "deep-mnist" with the tag 0.1
```
!s2i build . seldonio/seldon-core-s2i-python36:1.8.0-dev deep-mnist:0.1
```
## 3) Send some data to the docker model to test it
We first run the docker image we just created as a container called "mnist_predictor"
```
!docker run --name "mnist_predictor" -d --rm -p 5000:5000 deep-mnist:0.1
```
Send some random features that conform to the contract
```
import matplotlib.pyplot as plt
import numpy as np
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
# We now test the REST endpoint expecting the same result
endpoint = "0.0.0.0:5000"
batch = x
payload_type = "ndarray"
sc = SeldonClient(microservice_endpoint=endpoint)
# We use the microservice, instead of the "predict" function
client_prediction = sc.microservice(
data=batch,
method="predict",
payload_type=payload_type,
names=["tfidf"])
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
!docker rm mnist_predictor --force
```
## 4) Install and configure Azure tools
First we install the azure cli - follow specific instructions at https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest
```
!curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
```
### Configure the azure CLI so it can talk to your server
(if you are getting issues, make sure you have the permmissions to create clusters)
You must run this through a terminal and follow the instructions:
```
az login
```
Once you are logged in, we can create our cluster. Run the following command, it may take a while so feel free to get a ☕.
```
%%bash
# We'll create a resource group
az group create --name SeldonResourceGroup --location westus
# Now we create the cluster
az aks create \
--resource-group SeldonResourceGroup \
--name SeldonCluster \
--node-count 1 \
--enable-addons monitoring \
--generate-ssh-keys
--kubernetes-version 1.13.5
```
Once it's created we can authenticate our local `kubectl` to make sure we can talk to the azure cluster:
```
!az aks get-credentials --resource-group SeldonResourceGroup --name SeldonCluster
```
And now we can check that this has been successful by making sure that our `kubectl` context is pointing to the cluster:
```
!kubectl config get-contexts
```
## Setup Seldon Core
Use the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Setup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Ambassador) and [Install Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Install-Seldon-Core). Instructions [also online](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).
## Push docker image
In order for the EKS seldon deployment to access the image we just built, we need to push it to the Azure Container Registry (ACR) - you can check if it's been successfully created in the dashboard https://portal.azure.com/#blade/HubsExtension/BrowseResourceBlade/resourceType/Microsoft.ContainerRegistry%2Fregistries
If you have any issues please follow the official Azure documentation: https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-azure-cli
### First we create a registry
Make sure you keep the `loginServer` value in the output dictionary as we'll use it below.
```
!az acr create --resource-group SeldonResourceGroup --name SeldonContainerRegistry --sku Basic
```
### Make sure your local docker instance has access to the registry
```
!az acr login --name SeldonContainerRegistry
```
### Now prepare docker image
We need to first tag the docker image before we can push it.
NOTE: if you named your registry different make sure you change the value of `seldoncontainerregistry.azurecr.io`
```
!docker tag deep-mnist:0.1 seldoncontainerregistry.azurecr.io/deep-mnist:0.1
```
### And push the image
NOTE: if you named your registry different make sure you change the value of `seldoncontainerregistry.azurecr.io`
```
!docker push seldoncontainerregistry.azurecr.io/deep-mnist:0.1
```
## Running the Model
We will now run the model. As you can see we have a placeholder `"REPLACE_FOR_IMAGE_AND_TAG"`, which we'll replace to point to our registry.
Let's first have a look at the file we'll be using to trigger the model:
```
!cat deep_mnist.json
```
Now let's trigger seldon to run the model.
### Run the deployment in your cluster
NOTE: In order for this to work you need to make sure that your cluster has the permissions to pull the images. You can do this by:
1) Go into the Azure Container Registry
2) Select the SeldonContainerRegistry you created
3) Click on "Add a role assignment"
4) Select the AcrPull role
5) Select service principle
6) Find the SeldonCluster
7) Wait until the role has been added
We basically have a yaml file, where we want to replace the value "REPLACE_FOR_IMAGE_AND_TAG" for the image you pushed
```
%%bash
# Change accordingly if your registry is called differently
sed 's|REPLACE_FOR_IMAGE_AND_TAG|seldoncontainerregistry.azurecr.io/deep-mnist:0.1|g' deep_mnist.json | kubectl apply -f -
```
And let's check that it's been created.
You should see an image called "deep-mnist-single-model...".
We'll wait until STATUS changes from "ContainerCreating" to "Running"
```
!kubectl get pods
```
## Test the model
Now we can test the model, let's first find out what is the URL that we'll have to use:
```
!kubectl get svc ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}'
```
We'll use a random example from our dataset
```
import matplotlib.pyplot as plt
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
```
We can now add the URL above to send our request:
```
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
host = "52.160.64.65"
port = "80" # Make sure you use the port above
batch = x
payload_type = "ndarray"
sc = SeldonClient(
gateway="ambassador",
ambassador_endpoint=host + ":" + port,
namespace="default")
client_prediction = sc.predict(
data=batch,
deployment_name="deep-mnist",
names=["text"],
payload_type=payload_type)
print(client_prediction)
```
### Let's visualise the probability for each label
It seems that it correctly predicted the number 7
```
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
```
| github_jupyter |
<img src="../static/aeropython_name_mini.png" alt="AeroPython" style="width: 300px;"/>
# Clase 5: SymPy

__SymPy es una biblioteca de Python para matemática simbólica__. Apunta a convertirse en un sistema de algebra computacional (__CAS__) con todas sus prestaciones manteniendo el código tan simple como sea posible para manterlo comprensible y fácilmente extensible. SymPy está __escrito totalmente en Python y no requiere bibliotecas adicionales__. _Este proyecto comenzó en 2005, fue lanzado al público en 2007 y a él han contribuido durante estos años cientos de personas._
_ Otros CAS conocidos son Mathematica y Maple, sin embargo ambos son software privativo y de pago. [Aquí](https://github.com/sympy/sympy/wiki/SymPy-vs.-Maple) puedes encontrar una comparativa de SymPy con Maple. _
Hoy veremos cómo:
* Crear símbolos y expresiones.
* Manipular expresiones (simplificación, expansión...)
* Calcular derivadas e integrales.
* Límites y desarrollos en serie.
* Resolución de ecuaciones.
* Resolción de EDOs.
* Matrices
Sin embargo, SymPy no acaba aquí ni mucho menos...
## Documentación & SymPy Live Shell
```
from IPython.display import HTML
HTML('<iframe src="http://docs.sympy.org/latest/index.html" width="700" height="400"></iframe>')
```
## SymPy Gamma
```
HTML('<iframe src="http://www.sympygamma.com/input/?i=integrate%281+%2F+%281+%2B+x^2%29%29" width="700" height="400"></iframe>')
```
## Creación de símbolos
Lo primero, como siempre, es importar aquello que vayamos a necesitar:
```
# Importación
from sympy import init_session
init_session(use_latex='matplotlib')
```
Lo primero que vemos es que el comando `init_session` ha llevado a cabo algunas acciones por nostros:
* Gracias a `use_latex=True` obtenemos la salida en $\LaTeX$.
* __Ha creado una serie de variables__ para que podamos ponernos a trabajar en el momento.
<div class="alert warning-info"><strong>Nota:</strong>
En Python, no se declaran las variables, sin embargo, no puedes usar una hasta que no le hayas asignado un valor. Si ahora intentamos crear una variable `a` que sea `a = 2 * b`, veamos qué ocurre:
</div>
```
# Intentamos usar un símbolo que no hemos creado
a = 2 * b
```
Como en `b` no había sido creada, Python no sabe qué es `b`.
Esto mismo nos ocurre con los símbolos de SymPy. __Antes de usar una variable, debo decir que es un símbolo y asignárselo:__
```
# Creamos el símbolo a
a = symbols('a')
a
# Número pi
(a + pi) ** 2
# Unidad imaginaria
a + 2 * I
# Número e
E
# Vemos qué tipo de variable es a
type(a)
```
Ahora ya podría crear `b = 2 * a`:
```
b = 2 * a
b
type(b)
```
¿Qué está ocurriendo? Python detecta que a es una variable de tipo `Symbol` y al multiplicarla por `2` devuelve una variable de Sympy.
Como Python permite que el tipo de una variable cambie, __si ahora le asigno a `a` un valor float deja de ser un símbolo.__
```
a = 2.26492
a
type(a)
```
---
__Las conclusiones son:__
* __Si quiero usar una variable como símbolo debo crearla previamente.__
* Las operaciones con símbolos devuelven símbolos.
* Si una varibale que almacenaba un símbolo recibe otra asignación, cambia de tipo.
---
__Las variables de tipo `Symbol` actúan como contenedores en los que no sabemos qué hay (un real, un complejo, una lista...)__. Hay que tener en cuenta que: __una cosa es el nombre de la variable y otra el símbolo con el que se representa__.
```
#creación de símbolos
coef_traccion = symbols('c_T')
coef_traccion
```
Incluso puedo hacer cosas raras como:
```
# Diferencia entre variable y símbolo
a = symbols('b')
a
```
Además, se pueden crear varos símbolos a la vez:
```
x, y, z, t = symbols('x y z t')
```
y símbolos griegos:
```
w = symbols('omega')
W = symbols('Omega')
w, W
```

_Fuente: Documentación oficial de SymPy_
__Por defecto, SymPy entiende que los símbolos son números complejos__. Esto puede producir resultados inesperados ante determinadas operaciones como, por ejemplo, lo logaritmos. __Podemos indicar que la variable es real, entera... en el momento de la creación__:
```
# Creamos símbolos reales
x, y, z, t = symbols('x y z t', real=True)
# Podemos ver las asunciones de un símbolo
x.assumptions0
```
## Expresiones
Comencemos por crear una expresión como: $\cos(x)^2+\sin(x)^2$
```
expr = cos(x)**2 + sin(x)**2
expr
```
### `simplify()`
Podemos pedirle que simplifique la expresión anterior:
```
simplify(expr)
```
En este caso parece estar claro lo que quiere decir más simple, pero como en cualquier _CAS_ el comando `simplify` puede no devolvernos la expresión que nosotros queremos. Cuando esto ocurra necesitaremos usar otras instrucciones.
### `.subs()`
En algunas ocasiones necesitaremos sustituir una variable por otra, por otra expresión o por un valor.
```
expr
# Sustituimos x por y ** 2
expr.subs(x, y**2)
# ¡Pero la expresión no cambia!
expr
# Para que cambie
expr = expr.subs(x, y**2)
expr
```
Cambia el `sin(x)` por `exp(x)`
```
expr.subs(sin(x), exp(x))
```
Particulariza la expresión $sin(x) + 3 x $ en $x = \pi$
```
(sin(x) + 3 * x).subs(x, pi)
```
__Aunque si lo que queremos es obtener el valor numérico lo mejor es `.evalf()`__
```
(sin(x) + 3 * x).subs(x, pi).evalf(25)
#ver pi con 25 decimales
pi.evalf(25)
#el mismo resultado se obtiene ocn la función N()
N(pi,25)
```
# Simplificación
SymPy ofrece numerosas funciones para __simplificar y manipular expresiones__. Entre otras, destacan:
* `expand()`
* `factor()`
* `collect()`
* `apart()`
* `cancel()`
Puedes consultar en la documentación de SymPy lo que hace cada una y algunos ejemplos. __Existen también funciones específicas de simplificación para funciones trigonométricas, potencias y logaritmos.__ Abre [esta documentación](http://docs.sympy.org/latest/tutorial/simplification.html) si lo necesitas.
##### ¡Te toca!
Pasaremos rápidamente por esta parte, para hacer cosas "más interesantes". Te proponemos algunos ejemplos para que te familiarices con el manejor de expresiones:
__Crea las expresiones de la izquierda y averigua qué función te hace obtener la de la derecha:__
expresión 1| expresión 2
:------:|:------:
$\left(x^{3} + 3 y + 2\right)^{2}$ | $x^{6} + 6 x^{3} y + 4 x^{3} + 9 y^{2} + 12 y + 4$
$\frac{\left(3 x^{2} - 2 x + 1\right)}{\left(x - 1\right)^{2}} $ | $3 + \frac{4}{x - 1} + \frac{2}{\left(x - 1\right)^{2}}$
$x^{3} + 9 x^{2} + 27 x + 27$ | $\left(x + 3\right)^{3}$
$\sin(x+2y)$ | $\left(2 \cos^{2}{\left (y \right )} - 1\right) \sin{\left (x \right )} + 2 \sin{\left (y \right )} \cos{\left (x \right )} \cos{\left (y \right )}$
```
#1
expr1 = (x ** 3 + 3 * y + 2) ** 2
expr1
expr1_exp = expr1.expand()
expr1_exp
#2
expr2 = (3 * x ** 2 - 2 * x + 1) / (x - 1) ** 2
expr2
expr2.apart()
#3
expr3 = x ** 3 + 9 * x ** 2 + 27 * x + 27
expr3
expr3.factor()
#4
expr4 = sin(x + 2 * y)
expr4
expand(expr4)
expand_trig(expr4)
expand(expr4, trig=True)
```
# Derivadas e integrales
Puedes derivar una expresion usando el método `.diff()` y la función `dif()`
```
#creamos una expresión
expr = cos(x)
#obtenemos la derivada primera con funcion
diff(expr, x)
#utilizando método
expr.diff(x)
```
__¿derivada tercera?__
```
expr.diff(x, x, x)
expr.diff(x, 3)
```
__¿varias variables?__
```
expr_xy = y ** 3 * sin(x) ** 2 + x ** 2 * cos(y)
expr_xy
diff(expr_xy, x, 2, y, 2)
```
__Queremos que la deje indicada__, usamos `Derivative()`
```
Derivative(expr_xy, x, 2, y)
```
__¿Será capaz SymPy de aplicar la regla de la cadena?__
```
# Creamos una función F
F = Function('F')
F(x)
# Creamos una función G
G = Function('G')
G(x)
```
$$\frac{d}{d x} F{\left (G(x) \right )} $$
```
# Derivamos la función compuesta F(G(x))
F(G(x)).diff(x)
```
En un caso en el que conocemos las funciones:
```
# definimos una f
f = 2 * y * exp(x)
f
# definimos una g(f)
g = f **2 * cos(x) + f
g
#la derivamos
diff(g,x)
```
##### Te toca integrar
__Si te digo que se integra usando el método `.integrate()` o la función `integrate()`__. ¿Te atreves a integrar estas casi inmediatas...?:
$$\int{\cos(x)^2}dx$$
$$\int{\frac{dx}{\sin(x)}}$$
$$\int{\frac{dx}{(x^2+a^2)^2}}$$
```
int1 = cos(x) ** 2
integrate(int1)
int2 = 1 / sin(x)
integrate(int2)
x, a = symbols('x a', real=True)
int3 = 1 / (x**2 + a**2)**2
integrate(int3, x)
```
# Límites
Calculemos este límite sacado del libro _Cálculo: definiciones, teoremas y resultados_, de Juan de Burgos:
$$\lim_{x \to 0} \left(\frac{x}{\tan{\left (x \right )}}\right)^{\frac{1}{x^{2}}}$$
Primero creamos la expresión:
```
x = symbols('x', real=True)
expr = (x / tan(x)) ** (1 / x**2)
expr
```
Obtenemos el límite con la función `limit()` y si queremos dejarlo indicado, podemos usar `Limit()`:
```
limit(expr, x, 0)
```
# Series
Los desarrollos en serie se pueden llevar a cabo con el método `.series()` o la función `series()`
```
#creamos la expresión
expr = exp(x)
expr
#la desarrollamos en serie
series(expr)
```
Se puede especificar el número de términos pasándole un argumento `n=...`. El número que le pasemos será el primer término que desprecie.
```
# Indicando el número de términos
series(expr, n=10)
```
Si nos molesta el $\mathcal{O}(x^{10})$ lo podemos quitar con `removeO()`:
```
series(expr, n=10).removeO()
series(sin(x), n=8, x0=pi/3).removeO().subs(x, x-pi/3)
```
---
## Resolución de ecuaciones
Como se ha mencionado anteriormente las ecuaciones no se pueden crear con el `=`
```
#creamos la ecuación
ecuacion = Eq(x ** 2 - x, 3)
ecuacion
# También la podemos crear como
Eq(x ** 2 - x -3)
#la resolvemos
solve(ecuacion)
```
Pero la gracia es resolver con símbolos, ¿no?
$$a e^{\frac{x}{t}} = C$$
```
# Creamos los símbolos y la ecuación
a, x, t, C = symbols('a, x, t, C', real=True)
ecuacion = Eq(a * exp(x/t), C)
ecuacion
# La resolvemos
solve(ecuacion ,x)
```
Si consultamos la ayuda, vemos que las posibilidades y el número de parámetros son muchos, no vamos a entrar ahora en ellos, pero ¿se ve la potencia?
## Ecuaciones diferenciales
Tratemos de resolver, por ejemplo:
$$y{\left (x \right )} + \frac{d}{d x} y{\left (x \right )} + \frac{d^{2}}{d x^{2}} y{\left (x \right )} = \cos{\left (x \right )}$$
```
x = symbols('x')
y = Function('y')
ecuacion_dif = Eq(y(x).diff(x,2) + y(x).diff(x) + y(x), cos(x))
ecuacion_dif
#resolvemos
dsolve(ecuacion_dif, f(x))
```
# Matrices
```
#creamos una matriz llena de símbolos
a, b, c, d = symbols('a b c d')
A = Matrix([
[a, b],
[c, d]
])
A
#sacamos autovalores
A.eigenvals()
#inversa
A.inv()
#elevamos al cuadrado la matriz
A ** 2
```
---
_ Esto ha sido un rápido recorrido por algunas de las posibilidades que ofrece SymPy . El cálculo simbólico es un terreno díficil y este joven paquete avanza a pasos agigantados gracias a un grupo de desarrolladores siempre dispuestos a mejorar y escuchar sugerencias. Sus posibilidades no acaban aquí. En otros cursos puedes encontrar también una clase sobre el módulo `mechanics`, pero además cuenta con herramientas para geometría, mecánica cuántica, teoría de números, combinatoria... Puedes echar un ojo [aquí](http://docs.sympy.org/latest/modules/index.html). _
---
Clase en vídeo, parte del [Curso de Python para científicos e ingenieros](http://cacheme.org/curso-online-python-cientifico-ingenieros/) grabado en la Escuela Politécnica Superior de la Universidad de Alicante.
```
from IPython.display import YouTubeVideo
YouTubeVideo("OGQRcYVys1Q", width=560, height=315, list="PLGBbVX_WvN7as_DnOGcpkSsUyXB1G_wqb")
```
---
Si te ha gustado esta clase:
<a href="https://twitter.com/share" class="twitter-share-button" data-url="https://github.com/AeroPython/Curso-AeroPython-UC3M/" data-text="Aprendiendo Python con" data-via="AeroPython" data-size="large" data-hashtags="AeroPython">Tweet</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
---
#### <h4 align="right">¡Síguenos en Twitter!
###### <a href="https://twitter.com/AeroPython" class="twitter-follow-button" data-show-count="false">Follow @AeroPython</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
##### <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es"><img alt="Licencia Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Curso AeroPython</span> por <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">Juan Luis Cano Rodriguez y Alejandro Sáez Mollejo</span> se distribuye bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es">Licencia Creative Commons Atribución 4.0 Internacional</a>.
##### <script src="//platform.linkedin.com/in.js" type="text/javascript"></script> <script type="IN/MemberProfile" data-id="http://es.linkedin.com/in/juanluiscanor" data-format="inline" data-related="false"></script> <script src="//platform.linkedin.com/in.js" type="text/javascript"></script> <script type="IN/MemberProfile" data-id="http://es.linkedin.com/in/alejandrosaezm" data-format="inline" data-related="false"></script>
---
_Las siguientes celdas contienen configuración del Notebook_
_Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_
File > Trusted Notebook
```
%%html
<a href="https://twitter.com/AeroPython" class="twitter-follow-button" data-show-count="false">Follow @AeroPython</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
# Esta celda da el estilo al notebook
from IPython.core.display import HTML
css_file = '../static/styles/style.css'
HTML(open(css_file, "r").read())
```
| github_jupyter |
# 1. Introduction to Python syntax
This notebook demonstrates some basic syntax rules of Python programming language. It's a scratchpad to experiment in, so go nuts! You _cannot_ break things.
- [Using Jupyter notebooks](#Using-Jupyter-notebooks)
- [Basic data types](#Basic-data-types)
- [Strings](#Strings)
- [Numbers and math](#Numbers-and-math)
- [Booleans](#Booleans)
- [Variable assignment](#Variable-assignment)
- [String methods](#String-methods)
- [Comments](#Comments)
- [The print() function](#The-print()-function)
- [Collections of data](#Collections-of-data)
- [Lists](#Lists)
- [Dictionaries](#Dictionaries)
- [`for` loops](#for-loops)
- [`if` statements](#if-statements)
- [Writing custom functions](#functions)
### Using Jupyter notebooks
There are many ways to write and run Python code on your computer. One way -- the method we're using today -- is to use [Jupyter notebooks](https://jupyter.org/), which run in your browser and allow you to intersperse documentation with your code. They're handy for bundling your code with a human-readable explanation of what's happening at each step. Check out some examples from the [L.A. Times](https://github.com/datadesk/notebooks) and [BuzzFeed News](https://github.com/BuzzFeedNews/everything#data-and-analyses).
**To add a new cell to your notebook**: Click the + button in the menu.
**To run a cell of code**: Select the cell and click the "Run" button in the menu, or you can press Shift+Enter.
**One common gotcha**: The notebook doesn't "know" about code you've written until you've _run_ the cell containing it. For example, if you define a variable called `my_name` in one cell, and later, when you try to access that variable in another cell but get an error that says `NameError: name 'my_name' is not defined`, the most likely solution is to run (or re-run) the cell in which you defined `my_name`.
(If you're interested in learning more about markdown, the markup language used to create text cells, [this is a good overview](https://daringfireball.net/projects/markdown/).)
### Basic data types
Just like Excel and other data processing software, Python recognizes a variety of data types, including three we'll focus on here:
- Strings (text)
- Numbers (integers, numbers with decimals and more)
- Booleans (`True` and `False`).
You can use the built-in [`type()`](https://docs.python.org/3/library/functions.html#type) function to check the data type of a value.
#### Strings
A string is a group of characters -- letters, numbers, whatever -- enclosed within single or double quotes (doesn't matter as long as they match). The code in these notebooks uses single quotes. (The Python style guide doesn't recommend one over the other: ["Pick a rule and stick to it."](https://www.python.org/dev/peps/pep-0008/#string-quotes))
If your string _contains_ apostrophes or quotes, you have two options: _Escape_ the offending character with a forward slash `\`:
```python
'Isn\'t it nice here?'
```
... or change the surrounding punctuation:
```python
"Isn't it nice here?"
```
The style guide recommends the latter over the former.
When you call the `type()` function on a string, Python will return `str`.
Calling the [`str()` function](https://docs.python.org/3/library/stdtypes.html#str) on a value will return the string version of that value (see examples below).
```
'Investigative Reporters & Editors'
type('hello!')
45
type(45)
str(45)
type(str(45))
```
If you "add" strings together with a plus sign `+`, it will concatenate them:
```
'IRE' + ' & ' + 'NICAR'
```
### ✍️ Try it yourself
Use the code blocks below to experiment with strings: creating them, checking the type and concatenating values.
#### Numbers and math
Python recognizes a variety of numeric data types. Two of the most common are integers (whole numbers) and floats (numbers with decimals).
Calling `int()` on a piece of numeric data (even if it's being stored as a string) will attempt to coerce it to an integer; calling `float()` will try to convert it to a float.
```
type(12)
type(12.4)
int(35.6)
int('45')
float(46)
float('45')
```
You can do [basic math](https://www.digitalocean.com/community/tutorials/how-to-do-math-in-python-3-with-operators) in Python. You can also do [more advanced math](https://docs.python.org/3/library/math.html).
```
4+2
10-9
5*10
1000/10
# ** raises a number to the power of another number
5**2
```
### ✍️ Try it yourself
Use the code blocks below to experiment with numbers: creating them, checking the type, doing basic math. See if you can find other Python functions for working with numbers.
#### Booleans
Just like in Excel, which has `TRUE` and `FALSE` data types, Python has boolean data types. They are `True` and `False` -- note that only the first letter is capitalized, and they are not sandwiched between quotes.
Boolean values are typically returned when you're evaluating some sort of conditional statement -- comparing values, checking to see if a string is inside another string or if a value is in a list, etc.
[Python's comparison operators](https://docs.python.org/3/reference/expressions.html#comparisons) include:
- `>` greater than
- `<` less than
- `>=` greater than or equal to
- `<=` less than or equal to
- `==` equal to
- `!=` not equal to
```
4 > 6
10 == 10
'IRE' == 'ire'
type(True)
```
### ✍️ Try it yourself
Use the code blocks below to experiment with booleans: creating them, checking the type, evaluating the result of a conditional statement, etc.
### Variable assignment
The `=` sign assigns a value to a variable name that you choose. Later, you can retrieve that value by referencing its variable name. Variable names can be pretty much anything you want ([as long as you follow some basic rules](https://thehelloworldprogram.com/python/python-variable-assignment-statements-rules-conventions-naming/)).
This can be a tricky concept at first! For more detail, [here's a pretty good explainer from Digital Ocean](https://www.digitalocean.com/community/tutorials/how-to-use-variables-in-python-3).
```
my_name = 'Frank'
my_name
```
You can also _reassign_ a different value to a variable name, though it's usually better practice to create a new variable.
```
my_name = 'Susan'
my_name
```
A common thing to do is to "save" the results of an expression by assigning the result to a variable.
```
my_fav_number = 10 + 3
my_fav_number
```
It's also common to refer to previously defined variables in an expression:
```
nfl_teams = 32
mlb_teams = 30
nba_teams = 30
nhl_teams = 31
number_of_pro_sports_teams = nfl_teams + mlb_teams + nba_teams + nhl_teams
number_of_pro_sports_teams
```
### ✍️ Try it yourself
Use the code blocks below to experiment with variable assignment.
### String methods
Let's go back to strings for a second. String objects have a number of useful [methods](https://docs.python.org/3/library/stdtypes.html#string-methods) -- let's use an example string to demonstrate a few common ones.
```
my_cool_string = ' Hello, friends!'
```
`upper()` converts the string to uppercase (see also `lower()`, `title()` and `casefold()`):
```
my_cool_string.upper()
```
`replace()` will replace a piece of text with other text that you specify:
```
my_cool_string.replace('friends', 'enemies')
```
[`split()`](https://docs.python.org/3/library/stdtypes.html#str.split) will split the string into a [_list_](#Lists) (more on these in a second) on a given delimiter (if you don't specify a delimiter, it'll default to splitting on a space, and -- usefully! -- it will ignore repeated spaces):
```
my_cool_string.split()
my_cool_string.split(',')
my_cool_string.split('friends')
```
`strip()` removes whitespace from either side of your string (but not internal whitespace):
```
my_cool_string.strip()
```
You can use a cool thing called "method chaining" to combine methods -- just tack 'em onto the end. Let's say we wanted to strip whitespace from our string _and_ make it uppercase:
```
my_cool_string.strip().upper()
```
Notice, however, that our original string is unchanged:
```
my_cool_string
```
Why? Because we haven't assigned the results of anything we've done to a variable. A common thing to do, especially when you're cleaning data, would be to assign the results to a new variable:
```
my_cool_string_clean = my_cool_string.strip().upper()
my_cool_string_clean
```
### ✍️ Try it yourself
Use the code blocks below to experiment with string functions.
### Comments
A line with a comment -- a note that you don't want Python to interpret -- starts with a `#` sign. These are notes to collaborators and to your future self about what's happening at this point in your script, and why.
Typically you'd put this on the line right above the line of code you're commenting on:
```
avg_settlement = 40827348.34328237
# coercing this to an int because we don't need any decimal precision
int(avg_settlement)
```
Multi-line comments are sandwiched between triple quotes (or triple apostrophes):
`'''
this
is a long
comment
'''`
or
`"""
this
is a long
comment
"""`
### The `print()` function
So far, we've just been running the notebook cells to get the last value returned by the code we write. Using the [`print()`](https://docs.python.org/3/library/functions.html#print) function is a way to print specific things in your script to the screen. This function is handy for debugging, especially if you're writing code outside of a notebook environment.
To print multiple things on the same line, separate them with a comma.
```
print('Hello!')
print(my_name)
print('Hello,', my_name)
```
## Collections of data
Now we're going to talk about two ways you can use Python to group data into a collection: lists and dictionaries.
### Lists
A _list_ is a comma-separated list of items inside square brackets: `[]`.
Here's a list of ingredients, each one a string, that together makes up a salsa recipe.
```
salsa_ingredients = ['tomato', 'onion', 'jalapeño', 'lime', 'cilantro']
```
To get an item out of a list, you'd refer to its numerical position in the list -- its _index_ (1, 2, 3, etc.) -- inside square brackets immediately following your reference to that list. In Python, as in many other programming languages, counting starts at 0. That means the first item in a list is item `0`.
```
salsa_ingredients[0]
salsa_ingredients[1]
```
You can use _negative indexing_ to grab things from the right-hand side of the list -- and in fact, `[-1]` is a common idiom for getting "the last item in a list" when it's not clear how many items are in your list.
```
salsa_ingredients[-1]
```
If you wanted to get a slice of multiple items out of your list, you'd use colons (just like in Excel, kind of!).
If you wanted to get the first three items, you'd do this:
```
salsa_ingredients[0:3]
```
You could also have left off the initial 0 -- when you leave out the first number, Python defaults to "the first item in the list." In the same way, if you leave off the last number, Python defaults to "the last item in the list."
```
salsa_ingredients[:3]
```
Note, too, that this slice is giving us items 0, 1 and 2. The `3` in our slice is the first item we _don't_ want. That can be kind of confusing at first. Let's try a few more:
```
# everything in the list except the first item
salsa_ingredients[1:]
# the second, third and fourth items
salsa_ingredients[1:4]
# the last two items
salsa_ingredients[-2:]
```
To see how many items are in a list, use the `len()` function:
```
len(salsa_ingredients)
```
To add an item to a list, use the [`append()`](https://docs.python.org/3/tutorial/datastructures.html#more-on-lists) method:
```
salsa_ingredients
salsa_ingredients.append('mayonnaise')
salsa_ingredients
```
Haha _gross_. To remove an item from a list, use the `pop()` method. If you don't specify the index number of the item you want to pop out, it will default to "the last item."
```
salsa_ingredients.pop()
salsa_ingredients
```
You can use the [`in` and `not in`](https://docs.python.org/3/reference/expressions.html#membership-test-operations) expressions to test membership in a list (will return a boolean):
```
'lime' in salsa_ingredients
'cilantro' not in salsa_ingredients
```
### ✍️ Try it yourself
Use the code blocks below to experiment with lists.
### Dictionaries
A _dictionary_ is a comma-separated list of key/value pairs inside curly brackets: `{}`. Let's make an entire salsa recipe:
```
salsa = {
'ingredients': salsa_ingredients,
'instructions': 'Chop up all the ingredients and cook them for awhile.',
'oz_made': 12
}
```
To retrieve a value from a dictionary, you'd refer to the name of its key inside square brackets `[]` immediately after your reference to the dictionary:
```
salsa['oz_made']
salsa['ingredients']
```
To add a new key/value pair to a dictionary, assign a new key to the dictionary inside square brackets and set the value of that key with `=`:
```
salsa['tastes_great'] = True
salsa
```
To delete a key/value pair out of a dictionary, use the `del` command and reference the key:
```
del salsa['tastes_great']
salsa
```
### ✍️ Try it yourself
Use the code blocks below to experiment with dictionaries.
### Indentation
Whitespace matters in Python. Sometimes you'll need to indent bits of code to make things work. This can be confusing! `IndentationError`s are common even for experienced programmers. (FWIW, Jupyter will try to be helpful and insert the correct amount of "significant whitespace" for you.)
You can use tabs or spaces, just don't mix them. [The Python style guide](https://www.python.org/dev/peps/pep-0008/) recommends indenting your code in groups of four spaces, so that's what we'll use.
### `for` loops
You would use a `for` loop to iterate over a collection of things. The statement begins with the keyword `for` (lowercase), then a temporary `variable_name` of your choice to represent each item as you loop through the collection, then the Python keyword `in`, then the collection you're looping over (or its variable name), then a colon, then the indented block of code with instructions about what to do with each item in the collection.
Let's say we have a list of numbers that we assign to the variable `list_of_numbers`.
```
list_of_numbers = [1, 2, 3, 4, 5, 6]
```
We could loop over the list and print out each number:
```
for number in list_of_numbers:
print(number)
```
We could print out each number _times 6_:
```
for number in list_of_numbers:
print(number*6)
```
... whatever you need to do in you loop. Note that the variable name `number` in our loop is totally arbitrary. This also would work:
```
for banana in list_of_numbers:
print(banana)
```
It can be hard, at first, to figure out what's a "Python word" and what's a variable name that you get to define. This comes with practice.
Strings are iterable, too. Let's loop over the letters in a sentence:
```
sentence = 'Hello, IRE & NICAR!'
for letter in sentence:
print(letter)
```
To this point: Strings are iterable, like lists, so you can use the same kinds of methods:
```
# get the first five characters
sentence[:5]
# get the length of the sentence
len(sentence)
'Hello' in sentence
```
You can iterate over dictionaries, too -- just remember that dictionaries _don't keep track of the order that items were added to it_.
When you're looping over a dictionary, the variable name in your `for` loop will refer to the keys. Let's loop over our `salsa` dictionary from up above to see what I mean.
```
for key in salsa:
print(key)
```
To get the _value_ of a dictionary item in a for loop, you'd need to use the key to retrieve it from the dictionary:
```
for key in salsa:
print(salsa[key])
```
### ✍️ Try it yourself
Use the code blocks below to experiment with for loops.
### `if` statements
Just like in Excel, you can use the "if" keyword to handle conditional logic.
These statements begin with the keyword `if` (lowercase), then the condition to evaluate, then a colon, then a new line with a block of indented code to execute if the condition resolves to `True`.
```
if 4 < 6:
print('4 is less than 6')
```
You can also add an `else` statement (and a colon) with an indented block of code you want to run if the condition resolves to `False`.
```
if 4 > 6:
print('4 is greater than 6?!')
else:
print('4 is not greater than 6.')
```
If you need to, you can add multiple conditions with `elif`.
```
HOME_SCORE = 6
AWAY_SCORE = 8
if HOME_SCORE > AWAY_SCORE:
print('we won!')
elif HOME_SCORE == AWAY_SCORE:
print('we tied!')
else:
print('we lost!')
```
### ✍️ Try it yourself
Use the code blocks below to experiment with _if_ statements.
| github_jupyter |
# Alternative methods for chemical equilibrium
The methods previously examined for determining the equilibrium composition rely on knowing the chemical reaction(s) occurring, and can involve highly nonlinear equations.
Fortunately, we have methods that do not require knowing what reaction(s) are occurring.
We will compare two such solution methods:
1. {ref}`gibbs-minimization`
2. {ref}`lagrange-method`
This modules introduces these methods using the same example as {doc}`equilibrium-constant`: consider a mixture with 1 kilomole of carbon monoxide (CO) that reacts with 0.5 kmol of oxygen (O$_2$) to form a mixture of CO, CO$_2$, and O$_2$, with the equilibrium conditions of 2500 K and (a) 1 atm (b) 10 atm. Find the equilibrium composition in terms of the mole fraction. Assume the mixture behaves as an ideal gas.
```
import numpy as np
import cantera as ct
from scipy.optimize import root, minimize
from pint import UnitRegistry
ureg = UnitRegistry()
Q_ = ureg.Quantity
# for convenience:
def to_si(quant):
'''Converts a Pint Quantity to magnitude at base SI units.
'''
return quant.to_base_units().magnitude
```
(gibbs-minimization)=
## Direct minimization of Gibbs free energy
One method to finding the equilibrium composition is to directly minimize the Gibbs free energy of the mixture.
The total Gibbs free energy of the mixture is
$$
G = \sum_{i=1}^C n_i \mu_i \;,
$$
where $C$ is the number of components (i.e., chemical species), $n_i$ is the number of moles of component $i$, and $\mu_i$ is the chemical potential of component $i$.
For an ideal gas in a mixture, the chemical potential can be calculated using
$$
\mu_i = \mu_i^{\circ} + R_{\text{univ}} T \ln \left( \frac{y_i P}{P^{\circ}} \right) \;,
$$
where $R_{\text{univ}}$ is the universal gas constant, $P$ is the mixture pressure,
$P^{\circ}$ is the (standard-state) reference pressure (usually 1 atm or 100 kPa),
and $\mu_i^{\circ}$ is the chemical potential of pure substance $i$ at temperature $T$ and reference pressure $P^{\circ}$, which is the same as the standard-state molar specific Gibbs free energy $\overline{g}_i^{\circ}$:
$$
\mu_i^{\circ} = \overline{g}_i^{\circ} = \overline{h}_i^{\circ} - T \overline{s}_i^{\circ} \;.
$$
This method works by treating this as an optimization problem, where the objective is to minimize $G$, which is a function of the composition $n_i$.
**Constraints:** However, this problem is constrained because the amount of each element must be balanced:
$$
E_j = E_{0, j}
$$
where $E_j = \sum_{i=1}^C n_i e_{i,j}$ is the number of moles of each element $j$ ($E$ is the total number of elements), $E_{0, j} = \sum_{i=1}^C n_{0,i} e_{i,j}$ is the initial number of moles of each element, $n_{0,i}$ is the initial number of moles of each component $i$, and $e_{i,j}$ is the number of moles of element $j$ in component $i$ (defined by the chemical formula).
In addition, the number of moles of each component must remain non-negative:
$$
n_i \geq 0
$$
This is thus a **constrained optimization** problem—we can solve these for simpler problems, but they can become computationally expensive for a larger number of unknowns. For now, we can use the [`SLSQP`](https://docs.scipy.org/doc/scipy/reference/optimize.minimize-slsqp.html) optimization method provided by the SciPy [`minimize`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html) function.
The formal statement of our problem is:
$$
\min_{n_0, n_1, n_2} \left( n_0 \mu_0 (n_0, n_1, n_2) + n_1 \mu_1 (n_0, n_1, n_2) + n_2 \mu_2 (n_0, n_1, n_2) \right) \\
\text{subject to:} \quad \sum_{i} n_i e_{i,0} - \sum_{i} n_{0,i} e_{i,0} = 0\\
\phantom{subject to:} \quad \sum_{i} n_i e_{i,1} - \sum_{i} n_{1,i} e_{i,0} = 0\\
\phantom{subject to:} \quad n_0 \geq 0 \\
\phantom{subject to:} \quad n_1 \geq 0 \\
\phantom{subject to:} \quad n_2 \geq 0
$$
We will need to define three functions:
1. Evaluate the Gibbs free energy of the mixture,
2. Evaluate the equality constraints on elemental balances
3. Evaluate the inequality constraints on numbers of moles
First, let's input the known information:
```
# Known information
components = ['CO', 'O2', 'CO2']
moles_initial = np.array([1.0, 0.5, 0.0])
temperature = Q_(2500, 'K')
pressures = [1, 10] * Q_('atm')
# elemental composition of species
elemental_comp = np.array([
[1, 0, 1], # carbon
[1, 2, 2], # oxygen
])
# initial molar amounts of each element
initial_elements = np.dot(elemental_comp, moles_initial)
def calc_total_gibbs(moles, temperature, pressure, components, gas):
'''Evaluate Gibbs free energy of mixture, based on component numbers of moles.
'''
moles = Q_(moles, 'kmol')
mole_fractions = moles / np.sum(moles)
# get standard-state Gibbs free energy of each component
gibbs = np.zeros(len(components))
for idx, comp in enumerate(components):
gas.TPX = (
to_si(temperature), to_si(Q_(1, 'atm')),
f'{comp}:1.0'
)
gibbs[idx] = gas.gibbs_mole
gibbs *= Q_('J/kmol')
gas_constant = Q_(ct.gas_constant, 'J/(kmol*K)')
chemical_potentials = (
gibbs + gas_constant * temperature * np.log(
mole_fractions * pressure / Q_(1.0, 'atm')
)
)
# scale this result down
return to_si(np.sum(moles * chemical_potentials)) / 1e6
# We need to define functions for the constraints:
def inequality_cons(x):
'''Inequality constraint: all numbers of moles must be ≥ 0.
'''
return x
def equality_cons(x):
'''Equality constraint: Number of moles of each element remain constant.
'''
return np.dot(elemental_comp, x) - initial_elements
```
```{margin} Potential issues
Notice that this function evaluating Gibbs free energy of the mixture scales the result down by $10^6$.
I found this was necessary for the solver to converge. However, this means that the function does not return the Gibbs free energy in units of J, but instead MJ.
```
```
# Solve for first pressure
pressure = pressures[0]
gas = ct.Solution('gri30.cti')
x0 = np.array([0.5, 0.5, 0.5])
sol = minimize(
calc_total_gibbs, x0, method='SLSQP',
args=(temperature, pressure, components, gas),
constraints=[
{'type': 'eq','fun': equality_cons},
{'type': 'ineq','fun': inequality_cons}
],
options={'maxiter': 1000}
)
moles = sol.x
mole_fractions = moles / np.sum(moles)
print('Successful convergence: ', sol.success)
# check constraints
print('All moles non-negative: ', all(moles > 0))
print('All elements balanced: ', all(equality_cons(moles) == 0))
print()
print(f'Mole fractions at {pressure: .1f}:')
for idx, comp in enumerate(components):
print(f'{comp}: {mole_fractions[idx]: .3f}')
# Now try next pressure
pressure = pressures[1]
gas = ct.Solution('gri30.cti')
x0 = np.array([0.5, 0.5, 0.5])
sol = minimize(
calc_total_gibbs, x0, method='SLSQP',
args=(temperature, pressure, components, gas),
constraints=[
{'type': 'eq','fun': equality_cons},
{'type': 'ineq','fun': inequality_cons}
],
options={'maxiter': 1000}
)
moles = sol.x
mole_fractions = moles / np.sum(moles)
print('Successful convergence: ', sol.success)
# check constraints
print('All moles non-negative: ', all(moles > 0))
print('All elements balanced: ', all(equality_cons(moles) == 0))
print()
print(f'Mole fractions at {pressure: .1f}:')
for idx, comp in enumerate(components):
print(f'{comp}: {mole_fractions[idx]: .3f}')
```
These results match the values we found previously—whew! 😅
(lagrange-method)=
## Lagrange's method of undetermined multipliers
This method converts the problem into a system of algebraic equations, where the number of equations equal the number of unknowns. It does this by introducing a set of unknown multipliers, $\lambda_j$, with one for each element in the system.
Then, the system of equations we need to solve includes the element balances and equations involving the multipliers:
$$
\sum_{i=1}^C n_i e_{i,j} - \sum_{i=1}^C n_{0,i} e_{i,j} = 0 \quad \text{for } j=1, \ldots, E \;, \\
\mu_i + \sum_{j=1}^E \lambda_j e_{i,j} = 0 \quad \text{for } i=1, \ldots, C \;, \\
$$
where the unknowns are the numbers of moles for each compound $n_i$ where $i = 1, \ldots, C$ and the multipliers for each element $\lambda_j$ where $j = 1, \ldots, E$.
In this system, $e_{i,j}$ is the number of moles of element $j$ in component $i$, $n_{0,i}$ is the initial number of moles of component $i$, $\mu_i$ is the chemical potential of component $i$, $E$ is the number of elements, and $C$ is the number of components (chemical species).
The chemical potentials can be calculated for each component of an ideal gas:
$$
\mu_i = \mu_i^{\circ} + R_{\text{univ}} T \ln \left( \frac{y_i P}{P^{\circ}} \right) \;,
$$
where $R_{\text{univ}}$ is the universal gas constant, $P$ is the mixture pressure,
$P^{\circ}$ is the (standard-state) reference pressure (usually 1 atm or 100 kPa),
and $\mu_i^{\circ}$ is the chemical potential of pure substance $i$ at temperature $T$ and reference pressure $P^{\circ}$, which is the same as the standard-state molar specific Gibbs free energy $\overline{g}_i^{\circ}$:
$$
\mu_i^{\circ} = \overline{g}_i^{\circ} = \overline{h}_i^{\circ} - T \overline{s}_i^{\circ} \;.
$$
We can evaluate $\overline{g}_i^{\circ} (T)$ using a Cantera `Solution` object and specifying the temperature, pressure (using the 1 atm reference), and composition of each component as a pure substance.
```
# Known information
components = ['CO', 'O2', 'CO2']
moles_initial = np.array([1.0, 0.5, 0.0])
# Elemental makeup of components
elemental_comp = np.array([
[1, 0, 1], # carbon
[1, 2, 2], # oxygen
])
temperature = Q_(2500, 'K')
pressures = [1, 10] * Q_('atm')
def lagrange_system(x, temperature, pressure, components,
gas, elemental_comp, moles_initial):
'''System of equations for Lagrange multiplier approach.
'''
moles = np.array([x[0], x[1], x[2]])
multipliers = np.array([x[3], x[4]])
mole_fractions = moles / np.sum(moles)
# get standard-state Gibbs free energy of each component
gibbs = np.zeros(len(components))
for idx, comp in enumerate(components):
gas.TPX = (
to_si(temperature), to_si(Q_(1, 'atm')),
f'{comp}:1.0'
)
gibbs[idx] = gas.gibbs_mole
gibbs *= Q_('J/kmol')
gas_constant = Q_(ct.gas_constant, 'J/(kmol*K)')
chemical_potentials = (
gibbs + gas_constant * temperature * np.log(
mole_fractions * pressure / Q_(1.0, 'atm')
)
)
# initial molar amounts of each element
initial_moles_elements = np.dot(elemental_comp, moles_initial)
moles_elements = np.dot(elemental_comp, moles)
# We can take advantage of element-wise operations with these arrays,
# and concisely evaluate all the equations
element_equations = moles_elements - initial_moles_elements
multiplier_equations = to_si(
chemical_potentials +
np.dot(multipliers, elemental_comp) * Q_('J/kmol')
)
# Return the set of equations joined together
return np.concatenate((element_equations, multiplier_equations))
```
After setting up the function to evaluate the system of equations, we can solve for the equilibrium composition at the first pressure using the [`root`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html) function, with the `lm` (Levenberg-Marquardt) method.
We do need to specify some initial guess values for each of the unknowns; while guess values for the numbers of moles of each component may be straightforward (e.g., typically around one), the Lagrange multipliers are more abstract and may take some trial and error.
```
# Solve at first pressure
pressure = pressures[0]
gas = ct.Solution('gri30.cti')
# initial guesses
x0 = [1.0, 1.0, 1.0, 1e6, 1e6]
sol = root(
lagrange_system, x0, method='lm',
args=(temperature, pressure, components, gas, elemental_comp, moles_initial)
)
print('Root-finding algorithm success: ', sol.success)
print(f'Function evaluation (should be small): {sol.fun}')
print('Number of function evaluations: ', sol.nfev)
print()
moles = sol.x[0:3]
mole_fractions = moles / np.sum(moles)
print(f'Mole fractions at {pressure: .1f}:')
for idx, comp in enumerate(components):
print(f'{comp}: {mole_fractions[idx]: .3f}')
pressure = pressures[1]
gas = ct.Solution('gri30.cti')
x0 = [1.0, 1.0, 1.0, 1e6, 1e6]
sol = root(
lagrange_system, x0, method='lm',
args=(temperature, pressure, components, gas, elemental_comp, moles_initial)
)
print('Root-finding algorithm success: ', sol.success)
print(f'Function evaluation (should be near zero): {sol.fun}')
print('Number of function evaluations: ', sol.nfev)
print()
moles = sol.x[0:3]
mole_fractions = moles / np.sum(moles)
print(f'Mole fractions at {pressure: .1f}:')
for idx, comp in enumerate(components):
print(f'{comp}: {mole_fractions[idx]: .3f}')
```
As expected, this approach also produces the same equilibrium composition! 🎉
| github_jupyter |
```
#export
from fastai2.test import *
from fastai2.basics import *
from fastai2.callback.progress import *
from fastai2.text.data import TensorText
from nbdev.showdoc import *
#default_exp callback.wandb
```
# Wandb
> Integration with [wandb](https://www.wandb.com/)
First thing first, you need to install wandb with
```
pip install wandb
```
Create a free account then run
```
wandb login
```
in your terminal. Follow the link to get an API token that you will need to paste, then you're all set!
```
#export
import wandb
#export
class WandbCallback(Callback):
"Saves model topology, losses & metrics"
toward_end = True
# Record if watch has been called previously (even in another instance)
_wandb_watch_called = False
def __init__(self, log="gradients", log_preds=True, valid_dl=None, n_preds=36, seed=12345):
# W&B log step (number of training updates)
self._wandb_step = 0
self._wandb_epoch = 0
# Check if wandb.init has been called
if wandb.run is None:
raise ValueError('You must call wandb.init() before WandbCallback()')
store_attr(self, 'log,log_preds,valid_dl,n_preds,seed')
def begin_fit(self):
"Call watch method to log model topology, gradients & weights"
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds")
if not self.run: return
if not WandbCallback._wandb_watch_called:
WandbCallback._wandb_watch_called = True
# Logs model topology and optionally gradients and weights
wandb.watch(self.learn.model, log=self.log)
if hasattr(self, 'save_model'): self.save_model.add_save = Path(wandb.run.dir)/'bestmodel.pth'
if self.log_preds and not self.valid_dl:
#Initializes the batch watched
wandbRandom = random.Random(self.seed) # For repeatability
self.n_preds = min(self.n_preds, len(self.dbunch.valid_ds))
idxs = wandbRandom.sample(range(len(self.dbunch.valid_ds)), self.n_preds)
items = [self.dbunch.valid_ds.items[i] for i in idxs]
test_tls = [tl._new(items, split_idx=1) for tl in self.dbunch.valid_ds.tls]
self.valid_dl = self.dbunch.valid_dl.new(DataSource(tls=test_tls), bs=self.n_preds)
def after_batch(self):
"Log hyper-parameters and training loss"
if self.training:
self._wandb_step += 1
self._wandb_epoch += 1/self.n_iter
hypers = {f'{k}_{i}':v for i,h in enumerate(self.opt.hypers) for k,v in h.items()}
wandb.log({'epoch': self._wandb_epoch,'train_loss': self.smooth_loss, **hypers}, step=self._wandb_step)
def after_epoch(self):
"Log validation loss and custom metrics & log prediction samples"
# Correct any epoch rounding error and overwrite value
self._wandb_epoch = round(self._wandb_epoch)
wandb.log({'epoch': self._wandb_epoch}, step=self._wandb_step)
# Log sample predictions
if self.log_preds:
b = self.valid_dl.one_batch()
self.learn.one_batch(0, b)
preds = getattr(self.loss_func, 'activation', noop)(self.pred)
out = getattr(self.loss_func, 'decodes', noop)(preds)
x,y,its,outs = self.valid_dl.show_results(b, out, show=False, max_n=self.n_preds)
wandb.log({"Prediction Samples": wandb_process(x, y, its, outs)}, step=self._wandb_step)
wandb.log({n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}, step=self._wandb_step)
def after_fit(self):
self.run = True
wandb.log({}) #To trigger one last synch
```
Optionally logs weights and or gradients depending on `log` (can be "gradients", "parameters", "all" or None), sample predictions if ` log_preds=True` that will come from `valid_dl` or a random sample pf the validation set (determined by `seed`). `n_preds` are logged in this case.
If used in combination with `SaveModelCallback`, the best model is saved as well.
## Example of use:
Once your have defined your `Learner`, before you call to `fit` or `fit_one_cycle`, you need to initialize wandb:
```
import wandb
wandb.init(project=PROJECT_NAME, entity=USER_NAME)
```
(replace `PROJECT_NAME` and `USER_NAME`). Then you add the callback in your call to fit, potentially with `SaveModelCallback` if you want to save the best model:
```
from fastai2.callback.wandb import *
# To log only during one training phase
learn.fit(..., cbs=WandbCallback())
# To log continuously for all training phases
learn = learner(..., cbs=WandbCallback())
```
```
#export
@typedispatch
def wandb_process(x:TensorImage, y, samples, outs):
"Process `sample` and `out` depending on the type of `x/y`"
res = []
for s,o in zip(samples, outs):
img = s[0].permute(1,2,0)
res.append(wandb.Image(img, caption='Input data', grouping=3))
for t, capt in ((o[0], "Prediction"), (s[1], "Ground Truth")):
# Resize plot to image resolution (from https://stackoverflow.com/a/13714915)
my_dpi = 100
fig = plt.figure(frameon=False, dpi=my_dpi)
h, w = img.shape[:2]
fig.set_size_inches(w / my_dpi, h / my_dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# Superimpose label or prediction to input image
ax = img.show(ctx=ax)
ax = t.show(ctx=ax)
res.append(wandb.Image(fig, caption=capt))
plt.close(fig)
return res
#export
@typedispatch
def wandb_process(x:TensorImage, y:(TensorCategory,TensorMultiCategory), samples, outs):
return [wandb.Image(s[0].permute(1,2,0), caption=f'Ground Truth: {s[1]}\nPrediction: {o[0]}')
for s,o in zip(samples,outs)]
#export
@typedispatch
def wandb_process(x:TensorText, y:(TensorCategory,TensorMultiCategory), samples, outs):
data = [[s[0], s[1], o[0]] for s,o in zip(samples,outs)]
return wandb.Table(data=data, columns=["Text", "Target", "Prediction"])
#export
_all_ = ['wandb_process']
```
## Export -
```
#hide
from nbdev.export import *
notebook2script()
```
| github_jupyter |
```
from Naive import NaiveBayes
```
# Naive Bayes -- Class to Calculate Single Phrase Posterior Probability
This class is designed for calculating single phrase probability to classify a given property. We could either write our own definition of likelihoods of each feature, or simply load in my pre-defined `json` file.
In this notebook, let's simply try my pre-defined `json` file for practice.
Resources:
- [Naive Bayes Probabilistic Model (wiki)](https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Probabilistic_model)
- [Naive Bayes for Text Classification (Andrew Ng's ML course).mp4](http://openclassroom.stanford.edu/MainFolder/courses/MachineLearning/videos/06.2-NaiveBayes-TextClassification.mp4)
- [Naive Bayes for Spell Checker Python Code (Peter Norvig)](https://norvig.com/spell-correct.html)
## Loading Class
I load the likelihood table from `definition_time.json` file, and set the prior probability for being a time phrase to be 0.3.
```
from Naive import NaiveBayes
naive = NaiveBayes(filename='definition_time.json', prior=0.3)
# definition table attribute
naive.definition_dict.keys()
# you can check the columns inside it
naive.definition_dict['columns']
```
## Iterables and Likelihoods
Iterable means the bag-of-characters (or bag-of-words) that you think they are belonging to certain type of feature.
Say feature for 數字,
```
naive.definition_dict['data'][-4]
```
this means that we think the bag-of-chars `是元正𨳝一二三四五六七八九十廿卅` belonging to numbers and any time phrase would have 0.6 likelihood to have these numbers.
## Calculate Posterior Probability
Once we have prior and likelihoods from `json`, we can calculate the posterior probability with a given phrase.
```
# simply use calc_posterior method
naive.calc_posterior('興寧三年')
```
## Regularize the Irrelevant Characters
`naive.calc_posterior` method only consider the characters matched your given likelihoods and iterables. If you want to punish the irrelevant chars, simply use the `regularize` arg to set the punishment probability.
```
# If we don't set the regularization,
# these 2 phrase would have the same posterior.
naive.calc_posterior('興寧三年'), naive.calc_posterior('興寧三年你好嗎')
# we can set the regularize=0.4 to drag down the posterior of the second phrase
naive.calc_posterior('興寧三年', regularize=0.4), naive.calc_posterior('興寧三年你好嗎', regularize=0.4)
```
## Add your Own Definition
We can modify the definition dict to add new likelihoods and iterables.
```
# before adding 平成
naive.calc_posterior("平成三十年", regularize=0.3)
# we can add new definition of likelihood and a bag-of-words using dict
new_definition = {
'name': '現代紀年',
'iterable': ['中華民國', '民國', '平成', '昭和', '西元', '西曆'],
'likelihood': 0.8
}
naive.definition_dict['data'].append(
new_definition
)
# saving to json for new-time usage
naive.to_json('definition_time_modern.json')
# loading again
naive = NaiveBayes(filename="definition_time_modern.json", prior=0.3)
naive.calc_posterior("平成三十年", regularize=0.3)
```
## Interesting Philosophical Article about Thomas Bayes
- [Thomas Bayes and the crisis in science](https://www.the-tls.co.uk/articles/public/thomas-bayes-science-crisis/) by David Papineau: this article discuss the life of Thomas Bayes and why _inverse probability_ is interesting. Moreover, discuss about the over-used hypothesis testing and the ignorance of _prior probability_.
| github_jupyter |
```
import casadi as cs
from urdf2casadi import urdfparser as u2c
import urdf2casadi.geometry.dual_quaternion as dual_quaternion_geometry
import os # For current directory
dual_quaternion_to_transformation_matrix = dual_quaternion_geometry.to_numpy_transformation_matrix
# urdf2casadi uses cs.SX, which can be hard to read as these are sparse matrices.
# This short function just makes it so that the result will be a numpy matrix
# Use for
def cs2np(asd):
return cs.Function("temp",[],[asd])()["o0"].toarray()
# NOTE: casadi imports numpy as np, so cs.np is numpy
```
# Importing UR5 urdf
```
urdf_path = "../urdf/ur5_mod.urdf"
root_link = "base_link"
end_link = "tool0"
robot_parser = u2c.URDFparser()
robot_parser.from_file(urdf_path)
fk_dict = robot_parser.get_forward_kinematics(root_link, end_link)
print(fk_dict.keys())
```
`fk_dict` is a python dictionary with some of the things we can extract from the URDF. In this example we will show the forward kinematics for the UR5 as a transformation matrix, as a dual quaternion, and a way of calculating the jacobians for the forward kinematics.
## Joint information
```
# CasADi SX symbol giving the joint symbols:
q = fk_dict["q"]
# Upper limits of the joint values
q_upper = fk_dict["upper"]
# Lower limits of the joint values
q_lower = fk_dict["lower"]
# Joint names
joint_names = fk_dict["joint_names"]
print("Number of joints:", q.size()[0])
print("Upper limits:", q_upper)
print("Lower limits:", q_lower)
print("Joint names:", joint_names)
```
## Forward kinematics
```
# CasADi SX function for transformation matrix of the forward kinematics:
T_fk = fk_dict["T_fk"]
# CasADi SX function for dual_quaternion of the forward kinematics:
Q_fk = fk_dict["dual_quaternion_fk"]
```
So what's the position when all joint values are zero?
```
T0 = T_fk([0., 0., 0., 0., 0., 0.])
p0 = T0[:3, 3]
R0 = T0[:3, :3]
print("Transformation matrix:\n",cs2np(T0))
print("Position:\n", "x:",p0[0]," y:", p0[1], " z:", p0[2])
print("Distance from origin:\n", cs.np.linalg.norm(p0), "m")
```
And how about as a dual quaternion?
```
Q0 = Q_fk([0., 0., 0., 0., 0., 0.])
TofQ0 = dual_quaternion_to_transformation_matrix(Q0)
print("Dual quaternion:\n", cs2np(Q0))
print("Dual quaternion as transformation matrix:\n", cs2np(TofQ0))
if cs.np.linalg.norm(cs2np(TofQ0) - cs2np(T0)) < 1e-12:
print("||TofQ0 - T0||< 1e-12, so they are equal")
```
## Jacobians
As we're dealing with symbols, we formulate the symbolic expression for the jacobian, and then we create the functions for them.
```
# Create symbols
fk_position_jacobian_sym = cs.jacobian(T_fk(q)[:3,3], q)
fk_rotation_jacobian_sym = cs.jacobian(T_fk(q)[:3,:3], q)
fk_dual_quaternion_jacobian_sym = cs.jacobian(Q_fk(q), q)
# Create functions
fk_position_jacobian = cs.Function("jac_fk_pos", [q], [fk_position_jacobian_sym], ["q"], ["jac_fk_pos"])
fk_rotation_jacobian = cs.Function("jac_fk_rot", [q], [fk_rotation_jacobian_sym], ["q"], ["jac_fk_rot"])
fk_dual_quaternion_jacobian = cs.Function("jac_fk_Q", [q], [fk_dual_quaternion_jacobian_sym], ["q"], ["jac_fk_Q"])
```
Now let's test them out!
```
joint_vals = [0., 0., 0., 0., 0., 0.,]
pos_jac = fk_position_jacobian(joint_vals)
print("Positional jacobian at ", joint_vals, "is:\n", cs2np(pos_jac))
```
What does this tell us? Let's look at each of the x, y, and z directions separately.
```
print("Comparative measure of how readily each direction is controlled:")
print("Norm of jac in x direction:", cs.np.linalg.norm(cs2np(pos_jac[0,:])))
print("Norm of jac in y direction:", cs.np.linalg.norm(cs2np(pos_jac[1,:])))
print("Norm of jac in z direction:", cs.np.linalg.norm(cs2np(pos_jac[2,:])))
print("\nComparative measure of how readily each joint affects positions:")
for i in range(len(joint_names)):
print(joint_names[i]+":",cs.np.linalg.norm(cs2np(pos_jac[:,i])))
```
So the Z direction is most affected by any of the joint values (but the last), motion in the T direction can only be done by the shoulder pan joint (`q[0]`) because all the motors are standing perpendicular to the y direction. Oh, and the last joint seems to do nothing, well, that's because it's the infinite rotational joint at the end of the UR robot that is just used for rotating the end-effector frame. So basically it doesn't change the position.
What are these comparative measures we're talking about? Essentially it amounts to asking, if we take the same tiny step on each of the joints, which one is the most affected, or if wish to move, which direction is most affect by a tiny step or set of tiny steps?
The following are not as tidy and easily read, but given for completion
```
joint_vals = [0., 0., 0., 0., 0., 0.,]
rot_jac = fk_rotation_jacobian(joint_vals)
print(cs2np(rot_jac))
joint_vals = [0., 0., 0., 0., 0., 0.,]
Q_jac = fk_dual_quaternion_jacobian(joint_vals)
print(cs2np(Q_jac))
print(cs2np(Q_jac)[:4,0])
print("Comparative measure of joint effect on rotation")
for i in range(len(joint_names)):
print(joint_names[i]+":",cs.np.linalg.norm(cs2np(Q_jac)[:4,i])) # First four elements = rotation quaternion
```
Basically taking a tiny step on any of the joints is equally capable at causing rotation regardless of which of the joints we choose to move.
| github_jupyter |
# Time series forecasting using recurrent neural networks
### Import necessary libraries
```
%matplotlib notebook
import numpy
import pandas
import math
import time
import sys
import datetime
import matplotlib.pyplot as ma
import keras.models as km
import keras.layers as kl
import sklearn.preprocessing as sp
```
### Initialize random seed for constant neural network initialization
```
numpy.random.seed(42)
```
### Load necessary CSV file
```
try:
ts = pandas.read_csv('../../datasets/srv-1-art-5m.csv')
except:
print("I am unable to connect to read .csv file", sep=',', header=1)
ts.index = pandas.to_datetime(ts['ts'])
# delete unnecessary columns
del ts['id']
del ts['ts']
del ts['min']
del ts['max']
del ts['sum']
del ts['cnt']
del ts['p50']
del ts['p95']
del ts['p99']
# print table info
ts.info()
```
### Get values from specified range
```
ts = ts['2018-06-16':'2018-07-15']
```
### Remove possible zero and NA values (by interpolation)
We are using MAPE formula for counting the final score, so there cannot occure any zero values in the time series. Replace them with NA values. NA values are later explicitely removed by linear interpolation.
```
def print_values_stats():
print("Zero Values:\n",sum([(1 if x == 0 else 0) for x in ts.values]),"\n\nMissing Values:\n",ts.isnull().sum(),"\n\nFilled in Values:\n",ts.notnull().sum(), "\n")
idx = pandas.date_range(ts.index.min(), ts.index.max(), freq="5min")
ts = ts.reindex(idx, fill_value=None)
print("Before interpolation:\n")
print_values_stats()
ts = ts.replace(0, numpy.nan)
ts = ts.interpolate(limit_direction="both")
print("After interpolation:\n")
print_values_stats()
```
### Plot values
```
# Idea: Plot figure now and do not wait on ma.show() at the end of the notebook
def plot_without_waiting(ts_to_plot):
ma.ion()
ma.show()
fig = ma.figure(plot_without_waiting.figure_counter)
plot_without_waiting.figure_counter += 1
ma.plot(ts_to_plot, color="blue")
ma.draw()
try:
ma.pause(0.001) # throws NotImplementedError, ignore it
except:
pass
plot_without_waiting.figure_counter = 1
plot_without_waiting(ts)
```
### Normalize time series for neural network
LSTM cells are very sensitive to large scaled values. It's generally better to normalize them into <0,1> interval.
```
dates = ts.index # save dates for further use
scaler = sp.MinMaxScaler(feature_range=(0,1))
ts = scaler.fit_transform(ts)
```
### Split time series into train and test series
We have decided to split train and test time series by two weeks.
```
train_data_length = 12*24*7
ts_train = ts[:train_data_length]
ts_test = ts[train_data_length+1:]
```
### Create train and test dataset for neural networks
The neural network takes input from TS at time t and returns predicted output at time *t+1*. Generally, we could create neural network that would return predicted output at time *t+n*, just by adjusting *loop_samples* parameter.
```
def dataset_create(ts, loop_samples):
x = []
y = []
for i in range(len(ts)-loop_samples-1):
x.append(ts[i:(i+loop_samples), 0])
y.append(ts[i+loop_samples, 0])
return numpy.array(x), numpy.array(y)
train_dataset_x, train_dataset_y = dataset_create(ts_train, 1)
test_dataset_x, test_dataset_y = dataset_create(ts_test, 1)
```
### Reshape datasets for NN into [batch size; timesteps; input dimensionality] format
Keras library have specific needs in case of provided input's format. See https://keras.io/layers/recurrent/ for more details.
```
def dataset_reshape_for_nn(dataset):
return dataset.reshape((dataset.shape[0], 1, dataset.shape[1]))
train_dataset_x = dataset_reshape_for_nn(train_dataset_x)
test_dataset_x = dataset_reshape_for_nn(test_dataset_x)
```
### Create recurrent neural network
This recurrent neural network (RNN) consists of three layers (*input, hidden* and *output*). The input layer is implicitly specified by the hidden layer (*input_shape* parameter). Logically, we need to have exactly one input and one output node for one-step prediction. Number of hidden neurons is specified by *number_lstm_cells* variable.
In this RNN we use LSTM cells with sigmoid (http://mathworld.wolfram.com/SigmoidFunction.html) activation function. Network is configured to use *mean square error* (MSE) as optimalization function that is going to be minimized during backpropagation and *stochastic gradient descend* (SGD) optimizer with default parameters (https://keras.io/optimizers/).
```
number_lstm_cells = 2
# Layer based network
network = km.Sequential()
# Hidden layer is made from LSTM nodes
network.add(kl.LSTM(number_lstm_cells, activation="sigmoid", input_shape=(1,1)))
# Output layer with one output (one step prediction)
network.add(kl.Dense(1))
network.compile(loss="mse", optimizer="sgd", metrics=['mean_squared_error'])
```
### Train neural network
Train neural network on train data and plot MSE metrics for each iteration. Results and time of training process depends on *train_iterations* value.
```
train_iterations = 100
start_time = time.time()
print("Network fit started...\n")
network_history = network.fit(train_dataset_x, train_dataset_y, epochs=train_iterations, batch_size=1, verbose=0)
print("Network fit finished. Time elapsed: ", time.time() - start_time, "\n")
plot_without_waiting(network_history.history['mean_squared_error'])
```
### Predict new values
The array *test_dataset_x* is used as an input for the network.
```
predicted_values_unscaled = network.predict(test_dataset_x)
# Scale the predicted values back using MinMaxScaler
predicted_values_scaled = scaler.inverse_transform(predicted_values_unscaled)
# Scale test values back so we can compare the result
test_values_scaled = scaler.inverse_transform(ts_test)
```
### Count mean absolute percentage error
We use MAPE (https://www.forecastpro.com/Trends/forecasting101August2011.html) instead of MSE because the result of MAPE does not depend on size of values.
```
values_sum = 0
for value in zip(test_values_scaled, predicted_values_scaled):
actual = value[0][0]
predicted = value[1][0]
values_sum += abs((actual - predicted) / actual)
values_sum *= 100/len(test_values_scaled)
print("MAPE: ", values_sum, "%\n")
```
### Plot predicted values
```
fig = ma.figure(plot_without_waiting.figure_counter)
ma.plot(test_values_scaled, color="blue", label="Test")
ma.plot(predicted_values_scaled, color="red", label="LSTM neural network")
ts_len = len(ts)
date_offset_indices = ts_len // 6
ma.xticks(range(0, ts_len-train_data_length, date_offset_indices), [x.date().strftime('%Y-%m-%d') for x in dates[train_data_length::date_offset_indices]])
ma.xlabel("Timestamps")
ma.ylabel("Response times")
ma.legend(loc='best')
fig.show()
```
| github_jupyter |
```
import matplotlib.pyplot as plt
import scipy as sp
import numpy as np
import time
import networkx as nx
try:
from localgraphclustering import *
except:
# when the package is not installed, import the local version instead.
# the notebook must be placed in the original "notebooks/" folder
sys.path.append("../")
from localgraphclustering import *
import warnings
warnings.filterwarnings('ignore')
from os import listdir
from os.path import isfile, join
mypath_sz = '/Users/kimonfountoulakis/Downloads/sz/'
mypath_hc = '/Users/kimonfountoulakis/Downloads/hc/'
onlyfiles_sz = [f for f in listdir(mypath_sz) if isfile(join(mypath_sz, f))]
onlyfiles_hc = [f for f in listdir(mypath_hc) if isfile(join(mypath_hc, f))]
```
## Generate plots for Sz
```
for data in onlyfiles_sz:
full_path = mypath_sz + data
A_mp = np.loadtxt(open(full_path, "rb"), delimiter=",", skiprows=0)
A_mp_copy1 = A_mp.copy()
A_mp_copy2 = A_mp.copy()
A_mp_copy1[A_mp_copy1 < 0] = 0
A_mp = sp.sparse.csr_matrix(A_mp_copy1)
g = GraphLocal()
g.from_sparse_adjacency(A_mp)
g.is_disconnected()
# Use networkx only for visualization
g_nx = nx.from_scipy_sparse_matrix(A_mp)
pos_nx = nx.spring_layout(g_nx)
pos = []
for key, value in pos_nx.items():
pos.append(value)
ncp_instance = NCPData(g,store_output_clusters=False,do_largest_component=True)
ncp_instance.approxPageRank(ratio=1,nthreads=6,method = "l1reg-rand",normalize=True,normalized_objective=True)
print(" ")
print('Visualization of positive part')
drawing = g.draw(pos,nodecolor='black',figsize=(10,10),nodesize=150,edgealpha=0.05)
drawing.show()
print(" ")
print("Sz, subject: ", data, ' positive part')
# p = NCPPlots(ncp_instance)
# p.cond_by_size_itrv();
ncp_plots = NCPPlots(ncp_instance,method_name = "l1reg-rand")
#plot conductance vs size
# fig, ax, _ = ncp_plots.cond_by_size()
# plt.show()
#plot conductance vs volume
fig, ax, _ = ncp_plots.cond_by_vol()
plt.show()
# #plot isoperimetry vs size
# fig, ax, _ = ncp_plots.isop_by_size()
# plt.show()
A_mp_copy2[A_mp_copy2 > 0] = 0
A_mp_copy2 = -A_mp_copy2
A_mp = sp.sparse.csr_matrix(A_mp_copy2)
g = GraphLocal()
g.from_sparse_adjacency(A_mp)
g = GraphLocal()
g.from_sparse_adjacency(A_mp)
g.is_disconnected()
# Use networkx only for visualization
g_nx = nx.from_scipy_sparse_matrix(A_mp)
pos_nx = nx.spring_layout(g_nx)
pos = []
for key, value in pos_nx.items():
pos.append(value)
ncp_instance = NCPData(g,store_output_clusters=False,do_largest_component=False)
ncp_instance.approxPageRank(ratio=1,nthreads=6,method = "l1reg-rand",normalize=True,normalized_objective=True)
print(" ")
print('Visualization of positive part')
drawing = g.draw(pos,nodecolor='black',figsize=(10,10),nodesize=150,edgealpha=0.05)
drawing.show()
print(" ")
print("Sz, subject: ", data, ' negative part')
# p = NCPPlots(ncp_instance)
# p.cond_by_size_itrv();
ncp_plots = NCPPlots(ncp_instance,method_name = "l1reg-rand")
#plot conductance vs size
# fig, ax, _ = ncp_plots.cond_by_size()
# plt.show()
#plot conductance vs volume
fig, ax, _ = ncp_plots.cond_by_vol()
plt.show()
# #plot isoperimetry vs size
# fig, ax, _ = ncp_plots.isop_by_size()
# plt.show()
```
## Generate plots for Hc
```
data in onlyfiles_hc:
full_path = mypath_hc + data
A_mp = np.loadtxt(open(full_path, "rb"), delimiter=",", skiprows=0)
A_mp_copy1 = A_mp.copy()
A_mp_copy2 = A_mp.copy()
A_mp_copy1[A_mp_copy1 < 0] = 0
A_mp = sp.sparse.csr_matrix(A_mp_copy1)
g = GraphLocal()
g.from_sparse_adjacency(A_mp)
g.is_disconnected()
# Use networkx only for visualization
g_nx = nx.from_scipy_sparse_matrix(A_mp)
pos_nx = nx.spring_layout(g_nx)
pos = []
for key, value in pos_nx.items():
pos.append(value)
ncp_instance = NCPData(g,store_output_clusters=False,do_largest_component=True)
ncp_instance.approxPageRank(ratio=1,nthreads=6,method = "l1reg-rand",normalize=True,normalized_objective=True)
print(" ")
print('Visualization of positive part')
drawing = g.draw(pos,nodecolor='black',figsize=(10,10),nodesize=150,edgealpha=0.05)
drawing.show()
print(" ")
print("Sz, subject: ", data, ' positive part')
# p = NCPPlots(ncp_instance)
# p.cond_by_size_itrv();
ncp_plots = NCPPlots(ncp_instance,method_name = "l1reg-rand")
#plot conductance vs size
# fig, ax, _ = ncp_plots.cond_by_size()
# plt.show()
#plot conductance vs volume
fig, ax, _ = ncp_plots.cond_by_vol()
plt.show()
# #plot isoperimetry vs size
# fig, ax, _ = ncp_plots.isop_by_size()
# plt.show()
A_mp_copy2[A_mp_copy2 > 0] = 0
A_mp_copy2 = -A_mp_copy2
A_mp = sp.sparse.csr_matrix(A_mp_copy2)
g = GraphLocal()
g.from_sparse_adjacency(A_mp)
g = GraphLocal()
g.from_sparse_adjacency(A_mp)
g.is_disconnected()
# Use networkx only for visualization
g_nx = nx.from_scipy_sparse_matrix(A_mp)
pos_nx = nx.spring_layout(g_nx)
pos = []
for key, value in pos_nx.items():
pos.append(value)
ncp_instance = NCPData(g,store_output_clusters=False,do_largest_component=False)
ncp_instance.approxPageRank(ratio=1,nthreads=6,method = "l1reg-rand",normalize=True,normalized_objective=True)
print(" ")
print('Visualization of positive part')
drawing = g.draw(pos,nodecolor='black',figsize=(10,10),nodesize=150,edgealpha=0.05)
drawing.show()
print(" ")
print("Sz, subject: ", data, ' negative part')
# p = NCPPlots(ncp_instance)
# p.cond_by_size_itrv();
ncp_plots = NCPPlots(ncp_instance,method_name = "l1reg-rand")
#plot conductance vs size
# fig, ax, _ = ncp_plots.cond_by_size()
# plt.show()
#plot conductance vs volume
fig, ax, _ = ncp_plots.cond_by_vol()
plt.show()
# #plot isoperimetry vs size
# fig, ax, _ = ncp_plots.isop_by_size()
# plt.show()
```
| github_jupyter |
# Rational Expectations Agricultural Market Model
**Randall Romero Aguilar, PhD**
This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by Mario Miranda and Paul Fackler.
Original (Matlab) CompEcon file: **demintro01.m**
Running this file requires the Python version of CompEcon. This can be installed with pip by running
!pip install compecon --upgrade
<i>Last updated: 2021-Oct-01</i>
<hr>
```
import numpy as np
import matplotlib.pyplot as plt
from compecon import demo, qnwlogn, discmoments
%matplotlib inline
plt.style.use('seaborn')
```
Generate yield distribution
```
sigma2 = 0.2 ** 2
y, w = qnwlogn(25, -0.5 * sigma2, sigma2)
```
Compute rational expectations equilibrium using function iteration, iterating on acreage planted
```
A = lambda aa, pp: 0.5 + 0.5 * np.dot(w, np.maximum(1.5 - 0.5 * aa * y, pp))
ptarg = 1
a = 1
for it in range(50):
aold = a
a = A(a, ptarg)
print('{:3d} {:8.4f} {:8.1e}'.format(it, a, np.linalg.norm(a - aold)))
if np.linalg.norm(a - aold) < 1.e-8:
break
```
Intermediate outputs
```
q = a * y # quantity produced in each state
p = 1.5 - 0.5 * a * y # market price in each state
f = np.maximum(p, ptarg) # farm price in each state
r = f * q # farm revenue in each state
g = (f - p) * q #government expenditures
xavg, xstd = discmoments(w, np.vstack((p, f, r, g)))
varnames = ['Market Price', 'Farm Price', 'Farm Revenue', 'Government Expenditures']
```
Print results
```
print('\n{:24s} {:8s} {:8s}'.format('Variable', 'Expect', 'Std Dev'))
for varname, av, sd in zip(varnames, xavg, xstd):
print(f'{varname:24s} {av:8.4f} {sd:8.4f}')
```
Generate fixed-point mapping
```
aeq = a
a = np.linspace(0, 2, 100)
g = np.array([A(k, ptarg) for k in a])
```
### Graph rational expectations equilibrium
```
fig1 = plt.figure(figsize=[6, 6])
ax = fig1.add_subplot(111, title='Rational expectations equilibrium', aspect=1,
xlabel='Acreage Planted', xticks=[0, aeq, 2], xticklabels=['0', '$a^{*}$', '2'],
ylabel='Rational Acreage Planted', yticks=[0, aeq, 2],yticklabels=['0', '$a^{*}$', '2'])
ax.plot(a, g, 'b', linewidth=4)
ax.plot(a, a, ':', color='grey', linewidth=2)
ax.plot([0, aeq, aeq], [aeq, aeq, 0], 'r--', linewidth=3)
ax.plot([aeq], [aeq], 'ro', markersize=12)
ax.text(0.05, 0, '45${}^o$', color='grey')
ax.text(1.85, aeq - 0.15,'$g(a)$', color='blue')
fig1.show()
```
### Compute rational expectations equilibrium as a function of the target price
```
nplot = 50
ptarg = np.linspace(0, 2, nplot)
a = 1
Ep, Ef, Er, Eg, Sp, Sf, Sr, Sg = (np.empty(nplot) for k in range(8))
for ip in range(nplot):
for it in range(50):
aold = a
a = A(a, ptarg[ip])
if np.linalg.norm((a - aold) < 1.e-10):
break
q = a * y # quantity produced
p = 1.5 - 0.5 * a * y # market price
f = np.maximum(p, ptarg[ip]) # farm price
r = f * q # farm revenue
g = (f - p) * q # government expenditures
xavg, xstd = discmoments(w, np.vstack((p, f, r, g)))
Ep[ip], Ef[ip], Er[ip], Eg[ip] = tuple(xavg)
Sp[ip], Sf[ip], Sr[ip], Sg[ip] = tuple(xstd)
zeroline = lambda y: plt.axhline(y[0], linestyle=':', color='gray')
```
### Graph expected prices vs target price
```
fig2 = plt.figure(figsize=[8, 6])
ax1 = fig2.add_subplot(121, title='Expected price',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Expectation', yticks=[0.5, 1, 1.5, 2], ylim=[0.5, 2.0])
zeroline(Ep)
ax1.plot(ptarg, Ep, linewidth=4, label='Market Price')
ax1.plot(ptarg, Ef, linewidth=4, label='Farm Price')
ax1.legend(loc='upper left')
# Graph expected prices vs target price
ax2 = fig2.add_subplot(122, title='Price variabilities',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Standard deviation', yticks=[0, 0.1, 0.2]) #plt.ylim(0.5, 2.0)
zeroline(Sf)
ax2.plot(ptarg, Sp, linewidth=4, label='Market Price')
ax2.plot(ptarg, Sf, linewidth=4, label='Farm Price')
ax2.legend(loc='upper left')
fig2.show()
# Graph expected farm revenue vs target price
fig3 = plt.figure(figsize=[12, 6])
ax1 = fig3.add_subplot(131, title='Expected revenue',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Expectation', yticks=[1, 2, 3], ylim=[0.8, 3.0])
zeroline(Er)
ax1.plot(ptarg, Er, linewidth=4)
# Graph standard deviation of farm revenue vs target price
ax2 = fig3.add_subplot(132, title='Farm Revenue Variability',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Standard deviation', yticks=[0, 0.2, 0.4])
zeroline(Sr)
ax2.plot(ptarg, Sr, linewidth=4)
# Graph expected government expenditures vs target price
ax3 = fig3.add_subplot(133, title='Expected Government Expenditures',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Expectation', yticks=[0, 1, 2], ylim=[-0.05, 2.0])
zeroline(Eg)
ax3.plot(ptarg, Eg, linewidth=4)
plt.show()
#fig1.savefig('demintro02--01.png')
#fig2.savefig('demintro02--02.png')
#fig3.savefig('demintro02--03.png')
```
| github_jupyter |
```
import torch
from torch import nn, optim
import numpy as np
import mlflow
import pathlib
from pytorch_lightning.core.lightning import LightningModule
import pytorch_lightning
pytorch_lightning.__version__
mlflow.pytorch.autolog()
class Generator(LightningModule):
def __init__(self, voc, embed_size=128, hidden_size=512, is_lstm=True, lr: float =1e-3, dev_mode: bool = False):
super(Generator, self).__init__()
self.voc = voc
self.embed_size = embed_size
self.hidden_size = hidden_size
self.output_size = voc.size
self.embed = nn.Embedding(voc.size, embed_size)
self.is_lstm = is_lstm
rnn_layer = nn.LSTM if is_lstm else nn.GRU
self.rnn = rnn_layer(embed_size, hidden_size, num_layers=3, batch_first=True)
self.linear = nn.Linear(hidden_size, voc.size)
self.optim = optim.Adam(self.parameters(), lr=lr)
def forward(self, input, h):
output = self.embed(input.unsqueeze(-1))
output, h_out = self.rnn(output, h)
output = self.linear(output).squeeze(1)
return output, h_out
def init_h(self, batch_size, labels=None):
h = torch.rand(3, batch_size, 512)
if labels is not None:
h[0, batch_size, 0] = labels
if self.is_lstm:
c = torch.rand(3, batch_size, self.hidden_size)
return (h, c) if self.is_lstm else h
def likelihood(self, target):
batch_size, seq_len = target.size()
x = torch.LongTensor([self.voc.tk2ix["GO"]] * batch_size)
h = self.init_h(batch_size)
scores = torch.zeros(batch_size, seq_len)
for step in range(seq_len):
logits, h = self(x, h)
logits = logits.log_softmax(dim=-1)
score = logits.gather(1, target[:, step : step + 1]).squeeze()
scores[:, step] = score
x = target[:, step]
return scores
def PGLoss(self, loader):
for seq, reward in loader:
self.zero_grad()
score = self.likelihood(seq)
loss = score * reward
loss = -loss.mean()
loss.backward()
self.optim.step()
def sample(self, batch_size):
x = torch.LongTensor([self.voc.tk2ix["GO"]] * batch_size)
h = self.init_h(batch_size)
sequences = torch.zeros(batch_size, self.voc.max_len).long()
isEnd = torch.zeros(batch_size).bool()
for step in range(self.voc.max_len):
logit, h = self(x, h)
proba = logit.softmax(dim=-1)
x = torch.multinomial(proba, 1).view(-1)
x[isEnd] = self.voc.tk2ix["EOS"]
sequences[:, step] = x
end_token = x == self.voc.tk2ix["EOS"]
isEnd = torch.ge(isEnd + end_token, 1)
if (isEnd == 1).all():
break
return sequences
def evolve(self, batch_size, epsilon=0.01, crover=None, mutate=None):
# Start tokens
x = torch.LongTensor([self.voc.tk2ix["GO"]] * batch_size)
# Hidden states initialization for exploitation network
h = self.init_h(batch_size)
# Hidden states initialization for exploration network
h1 = self.init_h(batch_size)
h2 = self.init_h(batch_size)
# Initialization of output matrix
sequences = torch.zeros(batch_size, self.voc.max_len).long()
# labels to judge and record which sample is ended
is_end = torch.zeros(batch_size).bool()
for step in range(self.voc.max_len):
logit, h = self(x, h)
proba = logit.softmax(dim=-1)
if crover is not None:
ratio = torch.rand(batch_size, 1)
logit1, h1 = crover(x, h1)
proba = proba * ratio + logit1.softmax(dim=-1) * (1 - ratio)
if mutate is not None:
logit2, h2 = mutate(x, h2)
is_mutate = (torch.rand(batch_size) < epsilon)
proba[is_mutate, :] = logit2.softmax(dim=-1)[is_mutate, :]
# sampling based on output probability distribution
x = torch.multinomial(proba, 1).view(-1)
is_end |= x == self.voc.tk2ix["EOS"]
x[is_end] = self.voc.tk2ix["EOS"]
sequences[:, step] = x
if is_end.all():
break
return sequences
def evolve1(self, batch_size, epsilon=0.01, crover=None, mutate=None):
# Start tokens
x = torch.LongTensor([self.voc.tk2ix["GO"]] * batch_size)
# Hidden states initialization for exploitation network
h = self.init_h(batch_size)
# Hidden states initialization for exploration network
h2 = self.init_h(batch_size)
# Initialization of output matrix
sequences = torch.zeros(batch_size, self.voc.max_len).long()
# labels to judge and record which sample is ended
is_end = torch.zeros(batch_size).bool()
for step in range(self.voc.max_len):
is_change = torch.rand(1) < 0.5
if crover is not None and is_change:
logit, h = crover(x, h)
else:
logit, h = self(x, h)
proba = logit.softmax(dim=-1)
if mutate is not None:
logit2, h2 = mutate(x, h2)
ratio = torch.rand(batch_size, 1) * epsilon
proba = (
logit.softmax(dim=-1) * (1 - ratio) + logit2.softmax(dim=-1) * ratio
)
# sampling based on output probability distribution
x = torch.multinomial(proba, 1).view(-1)
x[is_end] = self.voc.tk2ix["EOS"]
sequences[:, step] = x
# Judging whether samples are end or not.
end_token = x == self.voc.tk2ix["EOS"]
is_end = torch.ge(is_end + end_token, 1)
# If all of the samples generation being end, stop the sampling process
if (is_end == 1).all():
break
return sequences
def fit(
self, loader_train, out: pathlib.Path, loader_valid=None, epochs=100, lr=1e-3
):
optimizer = optim.Adam(self.parameters(), lr=lr)
log = open(out.with_suffix(".log"), "w")
best_error = np.inf
for epoch in range(epochs):
for i, batch in enumerate(loader_train):
optimizer.zero_grad()
loss_train = self.likelihood(batch)
loss_train = -loss_train.mean()
loss_train.backward()
optimizer.step()
if i % 10 == 0 or loader_valid is not None:
seqs = self.sample(len(batch * 2))
ix = tensor_ops.unique(seqs)
seqs = seqs[ix]
smiles, valids = self.voc.check_smiles(seqs)
error = 1 - sum(valids) / len(seqs)
info = "Epoch: %d step: %d error_rate: %.3f loss_train: %.3f" % (
epoch,
i,
error,
loss_train.item(),
)
if loader_valid is not None:
loss_valid, size = 0, 0
for j, batch in enumerate(loader_valid):
size += batch.size(0)
loss_valid += (
-self.likelihood(batch).sum().item()
)
loss_valid = loss_valid / size / self.voc.max_len
if loss_valid < best_error:
torch.save(self.state_dict(), out.with_suffix(".pkg"))
best_error = loss_valid
info += " loss_valid: %.3f" % loss_valid
elif error < best_error:
torch.save(self.state_dict(), out.with_suffix(".pkg"))
best_error = error
print(info, file=log)
for i, smile in enumerate(smiles):
print("%d\t%s" % (valids[i], smile), file=log)
log.close()
from torch.utils.data import DataLoader
from src.drugexr.config.constants import MODEL_PATH, PROC_DATA_PATH, TEST_RUN
from src.drugexr.data.preprocess import logger
from src.drugexr.data_structs.vocabulary import Vocabulary
from src.drugexr.models.generator import Generator
import pandas as pd
voc = Vocabulary(vocabulary_path=pathlib.Path(PROC_DATA_PATH / "chembl_voc.txt"))
out_dir = MODEL_PATH / "output/rnn"
BATCH_SIZE = 512
netP_path = out_dir / "lstm_chembl_R_dev"
netE_path = out_dir / "lstm_ligand_R_dev"
prior = Generator(voc, is_lstm=True)
# Train loop
chembl = pd.read_table(PROC_DATA_PATH / "chembl_corpus_DEV_1000.txt").Token
chembl = torch.LongTensor(voc.encode([seq.split(" ") for seq in chembl]))
chembl = DataLoader(chembl, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
prior.fit(chembl, out=netP_path, epochs=50)
```
| github_jupyter |
```
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import lightgbm as lgb
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
df=pd.read_excel('preprocessed_data.xlsx')
df.drop(df.loc[df['language']!='en'].index, inplace=True)
df.shape
c = df.group.value_counts()
c = c[c < 200].index
print(c)
print(df.group.nunique() - len(c)+1)
descending_order = df['group'].value_counts().sort_values(ascending=False).index
plt.subplots(figsize=(22,5))
#add code to rotate the labels
ax=sns.countplot(x='group', data=df,order=descending_order)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90, ha="right")
plt.tight_layout()
plt.show()
#convert all categories with less than 200 freq as 'Grouped_Assignment'
df['New_group'] = np.where(df.groupby('group')['group'].transform('size') < 200, 'Grouped_Assignment', df['group'])
descending_order = df['New_group'].value_counts().sort_values(ascending=False).index
plt.subplots(figsize=(25,6))
#add code to rotate the labels
ax=sns.countplot(x='New_group', data=df,order=descending_order)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90, ha="right")
plt.tight_layout()
plt.show()
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Flatten, Bidirectional, GlobalMaxPool1D, TimeDistributed, Reshape,Dot
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import *
from keras.initializers import Constant
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import accuracy_score
max_features = 10000
maxlen = 40
embedding_size = 200
# define params
#NUM_WORDS = 20000
#EMBEDDING_DIM = 300
#MAX_LEN = 100
train = df.sample(frac=0.60, random_state=99)
test = df.loc[~df.index.isin(train.index), :]
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(train['merged_description'])
X = tokenizer.texts_to_sequences(train['merged_description'])
X = pad_sequences(X, maxlen = maxlen)
y = pd.get_dummies(train['New_group'])
print("Number of Samples:", len(X))
print(X[0])
print("Number of Labels: ", y.shape[1])
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
print(list(word_index.keys())[:100])
VOCAB_SIZE = len(word_index) + 1
VOCAB_SIZE
EMBEDDING_FILE = 'glove.6B.200d.txt'
embeddings = {}
for o in open(EMBEDDING_FILE):
word = o.split(" ")[0]
# print(word)
embd = o.split(" ")[1:]
embd = np.asarray(embd, dtype='float32')
# print(embd)
embeddings[word] = embd
print('Found %s word vectors.' % len(embeddings))
embedding_dim = 200
embedding_matrix = np.zeros((len(word_index)+1, embedding_dim))
for word, i in word_index.items():
if i > max_features:
continue
embedding_vector = embeddings.get(word)
if embedding_vector is not None:
# we found the word - add that words vector to the matrix
embedding_matrix[i] = embedding_vector
else:
# doesn't exist, assign a random vector
embedding_matrix[i] = np.random.randn(embedding_dim)
model = Sequential()
model.add(Embedding(len(word_index)+1,
embedding_dim,
embeddings_initializer=Constant(embedding_matrix),
input_length=maxlen,
trainable=True))
model.add(SpatialDropout1D(0.2))
model.add(Bidirectional(LSTM(64, recurrent_dropout=0.1,return_sequences=True)))
model.add(Bidirectional(LSTM(64, recurrent_dropout=0.1)))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu'))
model.add(Dense(7, activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
print(model.summary())
epochs = 20
batch_size = 128
history = model.fit(X, y, epochs=epochs, batch_size=batch_size, verbose=1, validation_split=0.1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
X_test = tokenizer.texts_to_sequences(test['merged_description'])
X_test = pad_sequences(X_test, maxlen = maxlen)
le = preprocessing.LabelEncoder()
le.fit(test['New_group'])
y_test=le.transform(test['New_group'])
word_index_test = tokenizer.word_index
y_hat = np.argmax(model.predict(X_test), axis=-1)
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
print(classification_report(y_hat,y_test))
model = Sequential()
model.add(Embedding(len(word_index)+1,
embedding_dim,
embeddings_initializer=Constant(embedding_matrix),
input_length=maxlen,
trainable=True))
model.add(SpatialDropout1D(0.2))
model.add(Bidirectional(LSTM(32, recurrent_dropout=0.1,return_sequences=True)))
model.add(Bidirectional(LSTM(32, recurrent_dropout=0.1)))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu'))
model.add(Dense(7, activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
epochs = 50
batch_size = 128
history = model.fit(X, y, epochs=epochs, batch_size=batch_size, verbose=1, validation_split=0.1)
y_hat_50 = np.argmax(model.predict(X_test), axis=-1)
print(classification_report(y_hat_50,y_test))
seq_input = Input(shape=(maxlen,), dtype='int32')
embedded = Embedding(len(word_index)+1,
embedding_dim,
embeddings_initializer=Constant(embedding_matrix),
input_length=maxlen,
trainable=True)(seq_input)
embedded = Dropout(0.2)(embedded)
lstm1 = Bidirectional(LSTM(embedding_dim, return_sequences=True))(embedded)
lstm1 = Dropout(0.2)(lstm1)
lstm2 = Bidirectional(LSTM(embedding_dim, return_sequences=True))(lstm1)
lstm2 = Dropout(0.2)(lstm2)
att_vector = TimeDistributed(Dense(1))(lstm2)
att_vector = Reshape((maxlen,))(att_vector)
att_vector = Activation('softmax', name='attention_vec')(att_vector)
att_output = Dot(axes=1)([lstm2, att_vector])
fc = Dense(embedding_dim, activation='relu')(att_output)
output = Dense(7, activation='softmax')(fc)
model = Model(inputs=[seq_input], outputs=output)
model.summary()
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer='adam')
history = model.fit(X, y, epochs=30, batch_size=128, validation_split=0.1, shuffle=True, verbose=2)
y_hat_att = np.argmax(model.predict(X_test), axis=-1)
print(classification_report(y_hat_att,y_test))
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
from keras.preprocessing import image
from PIL import Image
import os
import warnings
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
import cv2
from time import time
import matplotlib.style as style
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
warnings.filterwarnings('ignore')
```
# Data preparation
```
# !git clone https://github.com/MemphisMeng/posters.git
movies = pd.read_csv('movies.csv', engine='python')
movies = movies[['id', 'title', 'genres']]
# genre is empty, for prediction
predict_set = movies[movies['genres'].isna()]
movies = movies[movies['genres'].notna()]
movies['genres'] = movies['genres'].apply(lambda s: [l for l in str(s).split(', ')])
movies['genres']
# movies' posters can not be found in the image set
for i in movies.index:
if os.path.exists('posters/train/' + str(movies['id'][i]) + '.jpg') is False:
movies.drop(i, inplace=True)
for i in predict_set.index:
if os.path.exists('posters/test/' + str(predict_set['id'][i]) + '.jpg') is False:
predict_set.drop(i, inplace=True)
X_train, X_val, y_train, y_val = train_test_split(movies['id'], movies['genres'], test_size=0.2, random_state=42)
print("Number of posters for training: ", len(X_train))
print("Number of posters for validation: ", len(X_val))
X_train = [os.path.join('posters/train/', str(id)+'.jpg') for id in X_train if os.path.exists('posters/train/' + str(id) + '.jpg')]
X_val = [os.path.join('posters/train/', str(id)+'.jpg') for id in X_val if os.path.exists('posters/train/' + str(id) + '.jpg')]
X_train[:3]
```
# Preprocessing
```
y_train = list(y_train)
y_val = list(y_val)
y_train[:3]
print("Labels:")
mlb = MultiLabelBinarizer()
mlb.fit(y_train)
# Loop over all labels and show them
N_LABELS = len(mlb.classes_)
for (i, label) in enumerate(mlb.classes_):
print("{}. {}".format(i, label))
y_train_bin = mlb.transform(y_train)
y_val_bin = mlb.transform(y_val)
for i in range(3):
print(X_train[i], y_train_bin[i])
IMG_SIZE = 224 # Specify height and width of image to match the input format of the model
CHANNELS = 3 # Keep RGB color channels to match the input format of the model
BATCH_SIZE = 256 # Big enough to measure an F1-score
AUTOTUNE = tf.data.experimental.AUTOTUNE # Adapt preprocessing and prefetching dynamically
SHUFFLE_BUFFER_SIZE = 1024 # Shuffle the training data by a chunck of 1024 observations
def parse_function(filename, label):
"""Function that returns a tuple of normalized image array and labels array.
Args:
filename: string representing path to image
label: 0/1 one-dimensional array of size N_LABELS
"""
# Read an image from a file
image_string = tf.io.read_file(filename)
# Decode it into a dense vector
image_decoded = tf.image.decode_jpeg(image_string, channels=CHANNELS)
# Resize it to fixed shape
image_resized = tf.image.resize(image_decoded, [IMG_SIZE, IMG_SIZE])
# Normalize it from [0, 255] to [0.0, 1.0]
image_normalized = image_resized / 255.0
return image_normalized, label
def create_dataset(filenames, labels, is_training=True):
"""Load and parse dataset.
Args:
filenames: list of image paths
labels: numpy array of shape (BATCH_SIZE, N_LABELS)
is_training: boolean to indicate training mode
"""
# Create a first dataset of file paths and labels
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
# Parse and preprocess observations in parallel
dataset = dataset.map(parse_function, num_parallel_calls=AUTOTUNE)
if is_training == True:
# This is a small dataset, only load it once, and keep it in memory.
dataset = dataset.cache()
# Shuffle the data each buffer size
dataset = dataset.shuffle(buffer_size=SHUFFLE_BUFFER_SIZE)
# Batch the data for multiple steps
dataset = dataset.batch(BATCH_SIZE)
# Fetch batches in the background while the model is training.
dataset = dataset.prefetch(buffer_size=AUTOTUNE)
return dataset
train_ds = create_dataset(X_train, y_train_bin)
val_ds = create_dataset(X_val, y_val_bin)
for f, l in train_ds.take(1):
print("Shape of features array:", f.numpy().shape)
print("Shape of labels array:", l.numpy().shape)
```
# Modeling
```
feature_extractor_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4"
feature_extractor_layer = hub.KerasLayer(feature_extractor_url,
input_shape=(IMG_SIZE,IMG_SIZE,CHANNELS))
# feature_extractor_layer = tf.keras.applications.MobileNetV2(input_shape=(IMG_SIZE, IMG_SIZE, CHANNELS),
# include_top=False, weights='imagenet')
feature_extractor_layer.trainable = False
model = tf.keras.Sequential([
feature_extractor_layer,
layers.Dense(1024, activation='relu', name='hidden_layer'),
layers.Dense(N_LABELS, activation='sigmoid', name='output')
])
model.summary()
@tf.function
def macro_f1(y, y_hat, thresh=0.5):
"""Compute the macro F1-score on a batch of observations (average F1 across labels)
Args:
y (int32 Tensor): labels array of shape (BATCH_SIZE, N_LABELS)
y_hat (float32 Tensor): probability matrix from forward propagation of shape (BATCH_SIZE, N_LABELS)
thresh: probability value above which we predict positive
Returns:
macro_f1 (scalar Tensor): value of macro F1 for the batch
"""
y_pred = tf.cast(tf.greater(y_hat, thresh), tf.float32)
tp = tf.cast(tf.math.count_nonzero(y_pred * y, axis=0), tf.float32)
fp = tf.cast(tf.math.count_nonzero(y_pred * (1 - y), axis=0), tf.float32)
fn = tf.cast(tf.math.count_nonzero((1 - y_pred) * y, axis=0), tf.float32)
f1 = 2*tp / (2*tp + fn + fp + 1e-16)
macro_f1 = tf.reduce_mean(f1)
return macro_f1
@tf.function
def macro_soft_f1(y, y_hat):
"""Compute the macro soft F1-score as a cost (average 1 - soft-F1 across all labels).
Use probability values instead of binary predictions.
Args:
y (int32 Tensor): targets array of shape (BATCH_SIZE, N_LABELS)
y_hat (float32 Tensor): probability matrix from forward propagation of shape (BATCH_SIZE, N_LABELS)
Returns:
cost (scalar Tensor): value of the cost function for the batch
"""
y = tf.cast(y, tf.float32)
y_hat = tf.cast(y_hat, tf.float32)
tp = tf.reduce_sum(y_hat * y, axis=0)
fp = tf.reduce_sum(y_hat * (1 - y), axis=0)
fn = tf.reduce_sum((1 - y_hat) * y, axis=0)
soft_f1 = 2*tp / (2*tp + fn + fp + 1e-16)
cost = 1 - soft_f1 # reduce 1 - soft-f1 in order to increase soft-f1
macro_cost = tf.reduce_mean(cost) # average on all labels
return macro_cost
LR = 1e-5 # Keep it small when transfer learning
EPOCHS = 30
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=LR),
loss=macro_soft_f1,
metrics=[macro_f1])
# model.compile(
# optimizer=tf.keras.optimizers.Adam(lr=5e-4),
# loss=tf.keras.metrics.binary_crossentropy,
# metrics=[macro_f1])
start = time()
with tf.device('/device:GPU:0'):
history = model.fit(train_ds,
epochs=EPOCHS,
steps_per_epoch=70,
validation_data=create_dataset(X_val, y_val_bin),
validation_steps=70)
print('\nTraining took {}'.format(print_time(time()-start)))
def learning_curves(history):
"""Plot the learning curves of loss and macro f1 score
for the training and validation datasets.
Args:
history: history callback of fitting a tensorflow keras model
"""
loss = history.history['loss']
val_loss = history.history['val_loss']
macro_f1 = history.history['macro_f1']
val_macro_f1 = history.history['val_macro_f1']
epochs = len(loss)
style.use("bmh")
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(range(1, epochs+1), loss, label='Training Loss')
plt.plot(range(1, epochs+1), val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Loss')
plt.title('Training and Validation Loss')
plt.subplot(2, 1, 2)
plt.plot(range(1, epochs+1), macro_f1, label='Training Macro F1-score')
plt.plot(range(1, epochs+1), val_macro_f1, label='Validation Macro F1-score')
plt.legend(loc='lower right')
plt.ylabel('Macro F1-score')
plt.title('Training and Validation Macro F1-score')
plt.xlabel('epoch')
plt.show()
return loss, val_loss, macro_f1, val_macro_f1
losses, val_losses, macro_f1s, val_macro_f1s = learning_curves(history)
```
# Prediction
```
def show_prediction(index, movies_df, model):
# Get movie info
imdbId = movies_df['id'].iloc[index]
img_path = os.path.join('posters/test/', str(imdbId)+'.jpg')
# Read and prepare image
img = image.load_img(img_path, target_size=(IMG_SIZE,IMG_SIZE,CHANNELS))
img = image.img_to_array(img)
img = img/255
img = np.expand_dims(img, axis=0)
# Generate prediction
prediction = (model.predict(img) > 0.6).astype('int')
prediction = pd.Series(prediction[0])
prediction.index = mlb.classes_
prediction = prediction[prediction==1].index.values
# Dispaly image with prediction
style.use('default')
plt.figure(figsize=(8,4))
plt.imshow(Image.open(img_path))
plt.title('\n{}\nGenre Prediction\n{}'.format(movies_df['title'].iloc[index], list(prediction)), fontsize=9)
plt.show()
# examples
for i in range(5):
try:
show_prediction(i, predict_set, model)
except FileNotFoundError:
print('Poster of this movie is not available')
```
| github_jupyter |
# Introduction to Matplotlib (tutorial)
This notebook gives a short introduction to *Matplotlib*, Python's most popular package for plotting. Although many different plotting packages exist in the Python ecosystem (see [this talk](https://www.youtube.com/watch?v=FytuB8nFHPQ) for an overview), Matplotlib is arguably the most versatile and flexible. Here, we will give you a short tour of Matplotlib's most important features.
## Contents
1. The state-based approach
2. The object-oriented approach
3. Subplots (optional)
Most of the plotting functionality is contained in the subpackage `pyplot`, which is usually imported as follows:
```
import matplotlib.pyplot as plt # nice and short
```
Somewhat confusingly, Matplotlib has two interfaces for plotting: a state-based approach that mimicks Matlab's way of plotting and a more "Pythonic" object-oriented approach. As Matplotlib recommends using the object-oriented approach, we will spend most time on this approach. But you'll often see the state-based approach as well, so we'll start with quickly discussing this approach.
## The state-based approach
As mentioned, the state-based approach is a lot like the way plotting is done in Matlab: you call different *functions* that each take care of an aspect of the plot. In Matplotlib, most of these functions are contained in the `pyplot` package. Let's create simple line plot to show how the state-based approach looks like:
```
x = [0, 1, 2, 3, 4, 5, 6]
y = [1, 2, 3, 4, 5, 6, 7]
plt.plot(x, y) # plot the data
plt.xlabel('x', fontsize=25) # set the x-axis label
plt.ylabel('y', fontsize=25) # set the y-axis label
plt.show() # this actually visualizes the plot
```
As you can see, the state-based approach entails a series of function calls (such as `plt.plot` and `plt.xlabel`). After you are done plotting, you just call `plt.show` and the plot will show in your notebook (or an external image viewer if you run it from a script). Note that, technically, the `plt.show` call is not necessary to render the plot in Jupyter notebooks, but we recommend doing it anyway as this is good practice.
The `plt.plot` function is perhaps the most basic function, which can be used to create any plot of paired datapoints (x, y). By default, it creates a line plot (as shown above), but the many (optional) parameters in `plt.plot` allow you to create many different variations! For example, instead of a line, we can plot the data as separate red points by specifying the format in the third argument (here, 'o' to indicate points) and the color (by setting the argument `c`, for **c**olor, to "red"):
```
plt.plot(x, y, 'o', c='red')
plt.show()
```
<div class='alert alert-success'>
<b>Tip</b>: Note that the third argument, the "format", may be used to specify three things at once: whether you want "markers" (and which type of marker), whether you want a line (and which type of line), and which color the markers/line should have. So, to create red markers, you may specify "or" ("o" for circles as markers and "r" for red). To create a blue line, you may specify "-b". To create a yellow ("y") dotted line ("-.") with stars ("*") as markers, you may use "*-.y".
</div>
<div class='alert alert-warning'>
<b>ToDo</b>: Create the same plot as above, but with a green dashed line with diamonds as markers. Check out the <a href="https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html">plt.plot</a> documentation under "Notes" which particular linestyle you need for this! And check <a href="https://matplotlib.org/api/markers_api.html">this page</a> to see the name of the marker for diamonds! Make sure to write your code <b>above</b> the "<tt>ax2check = plt.gca()</tt>" snippet — we use this to check your plot automatically in the test cell! Also, do not include a <tt>plt.show</tt> call; this is done after the <tt>ax2check</tt> line (calling <tt>plt.show</tt> before <tt>ax2check</tt> will cause to test cell to malfunction).
</div>
```
""" Implement the ToDo here. """
### BEGIN SOLUTION
plt.plot(x, y, 'D--g')
### END SOLUTION
# Do not remove the code below!
ax2check = plt.gca()
plt.show()
""" Tests the ToDo above. """
line = ax2check.get_lines()[0]
if not line._color in ['g', 'green']:
raise ValueError("The line is not green!")
if line._linestyle != '--':
raise ValueError("You didn't use a dashed line!")
if line._marker._marker != 'D':
raise ValueError("You didn't use a dashed line!")
print("Yay! Well done.")
```
You can also plot multiple things within a single plot! Just call the `plt.plot` (or any other plotting function) multiple times. Below, we create a new variable (`y_sq`, the values of squared) and plot it in the same plot as our regular (x, y) plot. Importantly, we will include legend with the plot showing what each line represents using `plt.legend`:
```
y_sq = [yi ** 2 for yi in y] # check out this list comprehension!
plt.plot(x, y, '*b') # only plot markers (*) in blue
plt.plot(x, y_sq, '^--y') # plot both markers (^) and a line (--) in yellow
# Note that the plt.legend function call should come *after* the plotting calls
# and you should give it a *list* with strings
plt.legend(['y', 'y squared'])
plt.show()
```
<div class='alert alert-danger'>
<b>Warning</b>: Importantly, in the context of Jupyter notebooks, each part of the plot should be defined in the same code cell; otherwise, they won't be included in the same figure.
</div>
As you can see, Matplotlib automatically creates the right legend! Make sure that the order of your labels (here: `['y', 'y squared']`) matches the order of your plotting calls!
<div class='alert alert-success'>
<b>Good to know</b>: If you plot multiple things in the same plot, Matplotlib will automatically choose a different color for the different things (first one is blue, second one is orange, third one is green, etc.).
</div>
<div class='alert alert-warning'>
<b>ToDo</b>: Below, we define some new variables: <tt>z</tt>, the sine of z (<tt>sin_z</tt>), and the cosine of z (<tt>cos_z</tt>). Plot both the (<tt>z</tt>, <tt>sin_z</tt>) and (<tt>z</tt>, <tt>cos_z</tt>) collections as separate dashed lines in whatever (different) colors you like. Make sure to add a legend!
</div>
```
""" Implement your ToDo below. """
import math
z = [zi / 10 for zi in list(range(100))]
sin_z = [math.sin(zi) for zi in z]
cos_z = [math.cos(zi) for zi in z]
### BEGIN SOLUTION
plt.plot(z, sin_z)
plt.plot(z, cos_z)
plt.legend(["sin(z)", "cos(z)"])
### END SOLUTION
# Do not remove the code below and implement your code *above* this snippet
ax2check = plt.gca()
plt.show()
""" Tests the ToDo above. """
lines = ax2check.get_lines()
if len(lines) != 2:
raise ValueError(f"I expected 2 lines but found {len(lines)}!")
leg = ax2check.get_legend()
if leg is None:
raise ValueError("You didn't include a legend!")
n_leg = len(leg.get_lines())
if n_leg != 2:
raise ValueError("I expected two things in the legend but found {n_leg}!")
print("Good job!")
```
There are several other things that you can add to or tweak in your plot. For example, you can add a title with `plt.title` or you can change the default ticks and tick labels using `plt.xticks` (for the x-axis ticks/tick labels) and `plt.yticks` (for the y-axis ticks/tick labels). An example:
```
plt.title("Plot with modified x-axis ticks and tick labels!", fontsize=14)
plt.plot(x, y)
plt.xticks([0, 2, 4, 6], ['0th', '2nd', '4th', '6th'])
plt.show()
```
And you can control the range of the axes by the functions `plt.xlim` and `plt.ylim`:
```
plt.plot(x, y)
plt.xlim(-5, 12)
plt.ylim(-5, 12)
plt.show()
```
### Different plot functions
Of course, `plt.plot` is not the only plotting functions! There are many different plotting functions in Matplotlib, including scatterplots:
```
plt.title("A scatterplot!", fontsize=20)
# Note that this is equivalent to plt.plot(x, y, 'o') !
plt.scatter(x, y)
plt.show()
```
... and bar graphs:
```
# First argument determines the location of the bars on the x-axis
# and the second argument determines the height of the bars
plt.bar(x, x)
plt.show()
```
... and histograms:
```
# Let's generate some random data
import random
random_unif = [random.uniform(0, 1) for _ in range(100)]
plt.title("A histogram!", fontsize=20)
plt.hist(random_unif)
plt.xlabel("Value", fontsize=15)
plt.ylabel("Frequency", fontsize=15)
plt.show()
```
<div class='alert alert-warning'>
<b>ToDo</b>: Below, we again simulate some random data, but this time not from a uniform distribution, but from a normal distribution (with mean 0.5 and a standard deviation of 0.15). Plot both the uniform data (<tt>random_unif</tt>) and the normal data (<tt>random_norm</tt>) in the same plot, but with different colors (doesn't matter which). Make sure to use 10 bins for each histogram and make sure they are slightly transparent by setting the "alpha" level to 0.5 in both. Also, make sure the ticks and xtick labels are spaced 0.1 apart (i.e., a tick and label at 0, at 0.1, ..., until 1.0). And add a legend! To find out how to set the number of bins and the alpha level, check out the <a href="https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.hist.html">documentation</a>!
</div>
```
""" Implement the ToDo here! """
random_norm = [random.normalvariate(0.5, 0.15) for _ in range(100)]
import numpy as np
### BEGIN SOLUTION
plt.hist(random_unif, alpha=0.5, bins=10)
plt.hist(random_norm, alpha=0.5, bins=10)
plt.legend(['uniform', 'normal'])
plt.xlabel("Value", fontsize=15)
plt.xticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# Using numpy is also fine:
# plt.xticks(np.linspace(0, 1, 11))
plt.ylabel("Frequency", fontsize=15)
### END SOLUTION
# Do not remove the code below
ax2check = plt.gca()
plt.show()
""" Tests the ToDo above. """
import matplotlib
rect = [ch for ch in ax2check.get_children() if ch.__class__ == matplotlib.patches.Rectangle]
if len(rect) != 21:
raise ValueError(f"I expected 20 bins in total (10 per histogram), but found {len(rect) - 1}!")
if rect[0]._alpha != 0.5:
raise ValueError(f"The alpha level is not 0.5 (but {rect[0]._alpha})!")
leg = ax2check.get_legend()
if leg is None:
raise ValueError("I couldn't find a legend!")
if len(ax2check.get_xticks()) != 11:
raise ValueError("There should be 11 x-axis ticks and labels!")
xticks = ax2check.get_xticks()
if isinstance(xticks, np.ndarray):
xticks = [round(xt, 1) for xt in xticks.tolist()]
if not xticks == [i / 10 for i in list(range(11))]:
raise ValueError("The x-axis ticks and tick labels are not yet correct ...")
print("Awesome!")
```
## The object-oriented interface
The state-based plotting approach is easy to learn and pretty straightforward, but when you start creating more complex visualizations, you'll notice that the alternative "object-oriented" approach becomes easier to use. In this section, we will explain this approach by recreating some of the previous plots from the state-based section. We will also discuss some more advanced plotting techniques, such as creating subplots.
Now, within the object-oriented approach, we can explain some of the more technical (but important!) concepts. One of those is that each Matplotlib plot consists of a `Figure` object and one or more `Axes` objects. Essentially, the `Figure` object represents the entire canvas that defines the, well, figure. The `Axes` object(s) contains the actual visualizations that you want to include in the `Figure` (see figure below). Importantly, there may be one *or* multiple `Axes` object within a given `Figure` (e.g., two line plots next to each other).

*Figure from Brad Solomon, from [https://realpython.com/python-matplotlib-guide/](https://realpython.com/python-matplotlib-guide/)*
<div class='alert alert-danger'>
<b>Warning</b>: Note that an <tt>Axes</tt> object is something different than the x-axis and y-axis ("axes") of a plot!
</div>
Importantly, a `Figure` object by itself doesn't do anything. It just defines the canvas to drawn on, so to speak. `Figure` objects can be initialized using `plt.figure`, which takes several (optional) arguments like `figsize` (width and height in inches) and `dpi` ("dots per inch", i.e., resolution). Let's take a look:
```
fig = plt.figure(figsize=(8, 4))
plt.show()
```
As you can see, nothing happens. We also need an `Axes` object! We can create this using `plt.axes`. Note that, even in the object-oriented appoach, we need the function `plt.show` to render the figure.
```
fig = plt.figure(figsize=(8, 4))
ax = plt.axes()
plt.show()
```
Instead of creating the `Figure` and `Axes` objects separately, we highly recommend using the function `plt.subplots` to create them both at the same time. Like the name suggests, this function also allows you to create multiple subplots (across different `Axes`), which we'll discuss later. For now, we'll just use it to create a `Figure` and `Axes` object at once. Note that all arguments for creating `Figures` using `plt.figure` also work for `plt.subplots`. For example, you can give `plt.subplots` the arguments `figsize=(8, 4)` and `dpi=200` (these must be specified with keywords, however)! Check out the [full documentation] of `plt.subplots` to get an idea about the different arguments it accepts.
Anyway, let's take a look:
```
fig, ax = plt.subplots(figsize=(8, 4))
plt.show()
```
Alright, great, but it's still an empty canvas! Now, we could of course plot some data using the state-based interface (e.g., `plt.plot(x, y)`). Here, however, we will use the object-oriented approach. The only difference between these two approaches is that plotting in the object-oriented approach is done through the *methods* of the `Ax` object instead of the functions from the `pyplot` module. An example:
```
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(x, y) # here, `plot` is a method, not a function!
plt.show()
```
Basically all functions from the state-based interface are available as methods in the object-oriented approach. For example, to create a legend, run `ax.legend` (instead of `plt.legend`):
```
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(x, y)
ax.plot(x, y_sq)
ax.legend(['y', 'y squared'])
plt.show()
```
Some `pyplot` functions (like `plt.xlabel`), however, are prefixed with `set_` in the object-oriented interface (e.g., `ax.set_xlabel`):
```
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(x, y)
ax.set_xlabel('x', fontsize=15)
ax.set_ylabel('y', fontsize=15)
ax.set_title("Some plot", fontsize=20)
plt.show()
```
Okay, time for an exercise!
<div class='alert alert-warning'>
<b>ToDo</b>: Below, using a so-called "random walk", we create some random numbers representing two time series. Let's pretend that this data represents the stock price of two companies (Tesla and Shell) across 100 consecutive days. Create a <tt>Figure</tt> of 12 (width) by 4 (height) inches and a single <tt>Axes</tt> onto which you plot this time series data (as lines). Label the axes appropriately and set the range of the x-axis from 1 to 100.
</div>
```
""" Implement the ToDo here. """
steps1 = [random.uniform(-1, 1) for _ in range(100)]
steps2 = [random.uniform(-1, 1) for _ in range(100)]
tesla = [40 + sum(steps1[:i]) for i in range(100)]
shell = [35 + sum(steps2[:i]) for i in range(100)]
days = list(range(1, 101))
### BEGIN SOLUTION
fig, ax = plt.subplots(figsize=(15, 4))
ax.plot(days, tesla, '-')
ax.plot(days, shell, '-')
ax.set_xlim(1, 100)
ax.set_xlabel('Time (days)', fontsize=15)
ax.set_ylabel('Price', fontsize=15)
ax.legend(['Tesla', 'Shell'])
### END SOLUTION
# Do not remove the code below
ax2check = plt.gca()
plt.show()
""" Tests the above ToDo. """
lines = ax2check.get_lines()
if len(lines) != 2:
raise ValueError(f"I expected two lines, but I saw {len(lines)}!")
if ax2check.get_legend() is None:
raise ValueError("There is no legend!")
if not ax2check.get_xlabel():
raise ValueError("There is no label for the x-axis!")
if not ax2check.get_ylabel():
raise ValueError("There is no label for the y-axis!")
print("YES! Well done!")
```
Okay, one last thing we want to show you is how to save figures to disk! In the object-oriented interface, you can save figures using the `Figure` method `savefig` (check out its [documentation](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html)). The only mandatory argument is a filename, including an extension. The extension determines as which file type the figure is saved. If you want to save a figure as a PNG file, you can for example do the following:
```
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(x, y)
plt.show()
fig.savefig('my_awesome_figure.png')
```
This actually created a new file, "my_awesome_figure.png", in our current directory. We can double-check this using the command `!ls`, which is some Jupyter magic that allows us to use a code cell as a terminal temporarily:
```
!ls
```
## Subplots (optional)
In this optional section, we will discuss figures with "subplots", i.e., figures with more than one `Axes` object! The easiest way to do this is to use the `plt.subplots` function. This function accepts the arguments `ncols` and `nrows` to create a figure with multiple `Axes` next to each other (`ncols` > 1) or below each other (`nrows` > 1). For example, suppose that I want to create a figure with three plots next to each other:
```
# Note that I use the variable name "axes" here instead of "ax" like before
# This is not necessary, but I find it helpful because it tells me this variable contains
# more than one axis
fig, axes = plt.subplots(ncols=3, figsize=(15, 4))
plt.show()
```
The variable `axes` is slightly different from what we've seen before. Let's check out its type:
```
type(axes)
```
When you create a figure with more than one `Axes` object, the function `plt.subplots` returns a so-called *numpy* ndarray with `Axes` ("ndarray" stands for *N*-dimensional array). Numpy arrays are data structures that we discuss at length in the last (optional) notebook of this week. For now, you can interpret numpy arrays as (in this case) one or two-dimensional lists. To access the individual `Axes` objects from the numpy array, we can index them as if they are lists. For example:
```
first_ax = axes[0]
```
Now, let's plot some stuff in our different `Axes` objects.
```
fig, axes = plt.subplots(ncols=3, figsize=(15, 4))
axes[0].plot(x, y)
axes[1].plot(x, [yi ** 2 for yi in y])
axes[2].plot(x, [yi ** 3 for yi in y])
names = ['y', 'y squared', 'y cubed']
for i, name in enumerate(names):
axes[i].set_title(name, fontsize=20)
plt.show()
```
<div class='alert alert-warning'>
<b>ToDo</b>: As you probably have noticed by now, if you don't explicitly give Matplotlib a range for the axes (using <tt>xlim</tt> and <tt>ylim</tt>), it will chose a suitable range itself, which results in separate ranges for the subplots in the above figure. To force the same range across subplots, set the arguments <tt>sharex</tt> and/or <tt>sharey</tt> to <tt>True</tt> in the <tt>plt.subplots</tt> call. Do this in the previous code cell to force the same range across the y-axes!
</div>
We can, of course, create figures with multiple columns *and* multiple rows. For example, to create a 2 by 2 grid of `Axes`, we can do the following:
```
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(5, 5))
plt.show()
```
<div class='alert alert-success'>
<b>Tip</b>: Sometimes, like in the figure above, subplots may overlap slightly, especially in small figures. Matplotlib has a neat function to fix this: <tt>plt.tight_layout</tt>. Try adding it to the code cell above (after the <tt>plt.subplots</tt> line but before the <tt>plt.show()</tt> line).
</div>
Currently, the `axes` variable is a two-dimensional numpy array (because it has both multiple columns and multiple rows). We can double-check this by checking out the `shape` attribute from the numpy array:
```
axes.shape
```
Now, to access the individual `Axes` objects from this numpy array, we need two indices: one to indicate the row and one to indicate the column. For example, to get the upper left `Axes` object (i.e., first row, first column), we do:
```
upper_left_ax = axes[0, 0]
```
To get the upper right `Axes` object (i.e., first row, second column), we do:
```
upper_right_ax = axes[0, 1]
```
<div class='alert alert-warning'>
<b>ToDo</b>: Extract the lower right <tt>Axes</tt> object and store it in a variable named <tt>lower_right_ax</tt>
</div>
```
""" Implement the ToDo here. """
### BEGIN SOLUTION
lower_right_ax = axes[1, 1]
### END SOLUTION
""" Tests the above ToDo. """
if lower_right_ax != axes.flatten()[-1]:
raise ValueError("That is not the correct Axes object ...")
print("Well done!")
```
Alright, there is not much more to subplots that we explained here! Let's finish with a difficult exercise for those that want a challenge. From trigonometry, you may remember we can how to create a sine wave with a particular amplitude and frequency. Below, we included a function, `create_sine_wave`, which takes in a list of timepoints, a desired frequecy, and a desired amplitude:
```
def create_sine_wave(timepoints, frequency=1, amplitude=1):
""" Creates a sine wave with a given frequency and amplitude for a given set of timepoints.
Parameters
----------
timepoints : list
A list with timepoints (assumed to be in seconds)
frequency : int/float
Desired frequency (in Hz.)
amplitude : int/float
Desired amplitude (arbitrary units)
Returns
-------
sine : list
A list with floats representing the sine wave
"""
sine = [amplitude * math.sin(2 * math.pi * frequency * t) for t in timepoints]
return sine
```
Given some timepoints, we can plot its corresponding sine wave:
```
timepoints = [i / 100 for i in range(500)]
sine = create_sine_wave(timepoints)
fig, ax = plt.subplots(figsize=(8, 2))
ax.plot(timepoints, sine)
ax.set_xlabel("Time")
ax.set_ylabel("sin(x)")
ax.set_xlim(0, max(timepoints))
plt.show()
```
We already created a quite complicated figure with 9 subplots (3 rows, 3 columns), which shows a sine wave with increasing frequencies (1, 3, 5) across columns and increasing amplitudes across rows (1, 2, 4). We'll show this figure below:

<div class='alert alert-warning'>
<b>ToDo</b>: Try to recreate the figure above with your own code! Use a <tt>figsize</tt> of (10, 10). Good luck! (No test cell)
</div>
```
""" Implement your ToDo here. """
### BEGIN SOLUTION
fig, axes = plt.subplots(ncols=3, nrows=3, figsize=(10, 10), sharex=True, sharey=True)
amps = [1, 2, 4]
freqs = [1, 3, 5]
for i in range(len(amps)):
for ii in range(len(freqs)):
sine = create_sine_wave(timepoints, frequency=freqs[ii], amplitude=amps[i])
axes[i, ii].plot(timepoints, sine)
axes[i, ii].set_title(f"Freq = {freqs[ii]}, amp = {amps[i]}")
axes[i, ii].set_xlim(0, max(timepoints))
if ii == 0:
axes[i, ii].set_ylabel("Activity", fontsize=12)
if i == 2:
axes[i, ii].set_xlabel("Time", fontsize=12)
fig.tight_layout()
plt.show()
fig.savefig('solution_sine_wave_plot.png')
### END SOLUTION
```
| github_jupyter |
# Anatomy of an MD simulation script File
Different simulation packages often use different languages and have different syntax. For example, HOOMD-Blue uses a Python based interface, LAMMPS uses its own custom scripting language, and GROMACS relies upon an input file to define parameters and methods. However, despite these differences they all generally require the same information to be passed to the code
### Basic components of most script/input files
#### system/code initialization
>box size, particle types, particle initial positions
#### interaction definition
>how do the different species interact with each other
#### integrator setup
>what algorithm will we use to advance particles in time, time step of integration, thermodynamic state point (i.e., T or P).
#### runtime parameters
>total simulation time, which quantities to output and how frequently
## Basic HOOMD-Blue script file
As an example of defining each of these various components, consider setting a very simple simulation consisting of spheres that interact via the Lennard-Jones potential. This exercise is based on the following tutorial created by HOOMD developer Josh Anderson:
http://nbviewer.jupyter.org/github/joaander/hoomd-examples/blob/master/Tutorial%20-%20MD%20-%20Lennard%20Jones.ipynb
### Initialization
HOOMD-Blue uses a Python interface, so we must first import the relevant library and functions.
```
import hoomd
import hoomd.md
```
Next we must specify the 'execution context' to tell the code whether to run on the GPU and CPU. Note, by default HOOMD-Blue will run on the GPU if a compatible one is available, unless otherwise specified via the command line options or by passing an argument to the context initializer.
```
hoomd.context.initialize("")
```
Note, one can pass arguments to the intialize function (see [the documentation](https://hoomd-blue.readthedocs.io/en/stable/module-hoomd-context.html), to e.g., specify it to run on the CPU with a set number of threads,
```context.initialize("--mode=cpu --nthreads=64")```
Particle positions next need to be specified. HOOMD includes a few helper functions, primarily for the purposes of benchmarking, that allow simple systems to be defined. Note, in most cases, you will specify particle positions in a separate file (using the [GSD](http://gsd.readthedocs.io/en/stable/) format) and import this into hoomd (see [the documentation](https://hoomd-blue.readthedocs.io/en/stable/module-hoomd-init.html). Here we will create an 'n' by 'n' by 'n' lattice of particles, with 'n'=5.
```
hoomd.init.create_lattice(unitcell=hoomd.lattice.sc(a=2.0), n=5)
```
Note, by default, these particles will be labeled as type "A".
### Interaction Definition
Next we will define how particles interact. In this case, we will consider all interactions to be of type Lennard-Jones. In HOOMD, when defining a pair potential, we must also pass a neighborlist. Note, HOOMD-Blue supports several different types of neighborlists that will be discussed in detail later. Here, we will specify a 'cell'-based neighborlist (```nl```) and define the Lennard-Jones pair potential (```lj```), with a cutoff of 2.5.
```
nl = hoomd.md.nlist.cell()
lj = hoomd.md.pair.lj(r_cut=2.5, nlist=nl)
```
Next we need to specify the pair coefficients, i.e., epsilon and sigma for the LJ interaction, for each pair of particle types in the system. Since we only have a single type in our system ('A'), we need only define a single pair. Note, if you fail to define the interactions, a useful error message will be provided when you try to run the simulation:
```
**ERROR**: Type pair ('A', 'A') not found in pair coeff
**ERROR**: Not all pair coefficients are set
```
```
lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
```
### Integrator Setup
To actually move particles through time, we will specify the timestep of integration (i.e., our time resolution for the numerical integration):
```
hoomd.md.integrate.mode_standard(dt=0.005)
```
Next we specify the integration scheme and which particles it will apply to. Note, in most codes, users do not explicitly specify the underlying algorithm for numerical integration (e.g., Velocity-Verlet), as this is implicitly defined when selecting the larger integration scheme (i.e., selecting a thermostatting scheme).
Below, we create a [group](https://hoomd-blue.readthedocs.io/en/stable/module-hoomd-group.html) named "all" (that includes all particles) and use the [Langevin](https://hoomd-blue.readthedocs.io/en/stable/module-md-integrate.html?highlight=langevin) method:
```
all = hoomd.group.all();
hoomd.md.integrate.langevin(group=all, kT=0.2, seed=42);
```
### Runtime parameters
It is not typically useful to run a simulation without logging the thermodynamic quantities and structure. Here we define a log file using the [analyze function](https://hoomd-blue.readthedocs.io/en/stable/module-hoomd-analyze.html) to output specific thermodynamic quantities and the frequency for outputting them:
```
hoomd.analyze.log(filename="log-output.log",
quantities=['potential_energy', 'temperature'],
period=100,
overwrite=True)
```
Similarly, we define the name and frequency for outputting a trajectory using the [dump function](https://hoomd-blue.readthedocs.io/en/stable/module-hoomd-dump.html). Note that DCD trajectories can also be written, however, GSD files contain the necessary information to restart a simulation.
```
hoomd.dump.gsd("trajectory.gsd", period=2e3, group=all, overwrite=True)
```
Finally, we must specify the time period to run. Note, in HOOMD the system will begin running as soon as the run time is defined. A HOOMD script can have multiple calls to ```run```, as will be discussed later.
```
hoomd.run(1e4)
```
### Full script
```
import hoomd
import hoomd.md
hoomd.context.initialize("");
hoomd.init.create_lattice(unitcell=hoomd.lattice.sc(a=2.0), n=5)
nl = hoomd.md.nlist.cell();
lj = hoomd.md.pair.lj(r_cut=2.5, nlist=nl);
lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0);
hoomd.md.integrate.mode_standard(dt=0.005)
all = hoomd.group.all();
hoomd.md.integrate.langevin(group=all, kT=0.2, seed=42);
hoomd.analyze.log(filename="log-output.log",
quantities=['potential_energy', 'temperature'],
period=100,
overwrite=True)
hoomd.dump.gsd("trajectory.gsd", period=2e3, group=all, overwrite=True)
hoomd.run(1e4)
```
### Examining the log
The log file generated by hoomd can be easily plotted in matplotlib by using the ```genfromtxt``` function to read in the data. Similar to matlab, individual columns can be separated using the syntax ```data[:,column_num]```where column_num would vary between 0 and 2 in this case as each line in the data file is formatted as: ```time potential_energy temperature```. The plot below could easily be changed to output the temperature as a function of time by changing ```data[:,1]``` to ```data[:,2]```
```
%%bash
###output the first 10 lines of the datafile
cat log-output.log | head -n 10
import numpy
from matplotlib import pyplot
%matplotlib inline
data = numpy.genfromtxt(fname='log-output.log', skip_header=True);
pyplot.figure(figsize=(4,2.2), dpi=140);
pyplot.plot(data[:,0], data[:,1]);
pyplot.xlabel('time step');
pyplot.ylabel('potential_energy');
```
| github_jupyter |
# Clique Method Robustness Verification for Tree Ensembles and Gradient Boosted Decision Tree Classifiers
```
from xgboost import XGBClassifier
import lightgbm
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier
from art.classifiers import XGBoostClassifier, LightGBMClassifier, SklearnClassifier
from art.utils import load_dataset
from art.metrics import RobustnessVerificationTreeModelsCliqueMethod
import warnings
warnings.filterwarnings('ignore')
NB_TRAIN = 100
NB_TEST = 100
(x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist')
n_classes = 10
n_features = 28 * 28
n_train = x_train.shape[0]
n_test = x_test.shape[0]
x_train = x_train.reshape((n_train, n_features))
x_test = x_test.reshape((n_test, n_features))
x_train = x_train[:NB_TRAIN]
y_train = y_train[:NB_TRAIN]
x_test = x_test[:NB_TEST]
y_test = y_test[:NB_TEST]
```
# XGBoost
```
model = XGBClassifier(n_estimators=4, max_depth=6)
model.fit(x_train, np.argmax(y_train, axis=1))
classifier = XGBoostClassifier(model=model, nb_features=n_features, nb_classes=n_classes)
rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)
average_bound, verified_error = rt.verify(x=x_test, y=y_test, eps_init=0.3, nb_search_steps=10, max_clique=2,
max_level=2)
print('Average bound:', average_bound)
print('Verified error at eps:', verified_error)
```
# LightGBM
```
train_data = lightgbm.Dataset(x_train, label=np.argmax(y_train, axis=1))
test_data = lightgbm.Dataset(x_test, label=np.argmax(y_test, axis=1))
parameters = {'objective': 'multiclass',
'num_class': n_classes,
'metric': 'multi_logloss',
'is_unbalance': 'true',
'boosting': 'gbdt',
'num_leaves': 5,
'feature_fraction': 0.5,
'bagging_fraction': 0.5,
'bagging_freq': 0,
'learning_rate': 0.05,
'verbose': 0}
model = lightgbm.train(parameters,
train_data,
valid_sets=test_data,
num_boost_round=2,
early_stopping_rounds=10)
classifier = LightGBMClassifier(model=model)
rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)
average_bound, verified_error = rt.verify(x=x_test, y=y_test, eps_init=0.3, nb_search_steps=10, max_clique=2,
max_level=2)
print('Average bound:', average_bound)
print('Verified error at eps:', verified_error)
```
# GradientBoosting
```
model = GradientBoostingClassifier(n_estimators=4, max_depth=6)
model.fit(x_train, np.argmax(y_train, axis=1))
classifier = SklearnClassifier(model=model)
rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)
average_bound, verified_error = rt.verify(x=x_test, y=y_test, eps_init=0.3, nb_search_steps=10, max_clique=2,
max_level=2)
print('Average bound:', average_bound)
print('Verified error at eps:', verified_error)
```
# RandomForest
```
model = RandomForestClassifier(n_estimators=4, max_depth=6)
model.fit(x_train, np.argmax(y_train, axis=1))
classifier = SklearnClassifier(model=model)
rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)
average_bound, verified_error = rt.verify(x=x_test, y=y_test, eps_init=0.3, nb_search_steps=10, max_clique=2,
max_level=2)
print('Average bound:', average_bound)
print('Verified error at eps:', verified_error)
```
# ExtraTrees
```
model = ExtraTreesClassifier(n_estimators=4, max_depth=6)
model.fit(x_train, np.argmax(y_train, axis=1))
classifier = SklearnClassifier(model=model)
rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)
average_bound, verified_error = rt.verify(x=x_test, y=y_test, eps_init=0.3, nb_search_steps=10, max_clique=2,
max_level=2)
print('Average bound:', average_bound)
print('Verified error at eps:', verified_error)
```
| github_jupyter |
### Custom data generator for loading video data for action recognition
```
import pandas as pd
import cv2
import numpy as np
from sklearn.utils import shuffle
import os
from collections import deque
import copy
import matplotlib
import matplotlib.pyplot as plt
from keras.utils import np_utils
from config import Config
%matplotlib inline
```
#### A helper function for loading the samples in the format of
[[[frame1_filename,frame2_filename,…],label1], [[frame1_filename,frame2_filename,…],label2],……….]
```
# reading the video files from the csv file
def file_generator(data_path,data_files,temporal_stride=1,temporal_length=16):
'''
data_files - list of csv files to be read.
'''
for f in data_files: # read all the csv files (one csv file corresponds to one vdieo) in data_files one by one
tmp_df = pd.read_csv(os.path.join(data_path,f))
label_list = list(tmp_df['Label']) # Load all the labels in the label_list
total_images = len(label_list)
if total_images>=temporal_length: # only if the number of frames in the video is greater tha temporal length, use that video
num_samples = int((total_images-temporal_length)/temporal_stride)+1
print ('num of samples from vid seq-{}: {}'.format(f,num_samples))
img_list = list(tmp_df['FileName'])
else: # if the number of frames are less than temporal length , discard it
print ('num of frames is less than temporal length; hence discarding this file-{}'.format(f))
continue
start_frame = 0
samples = deque() # initliaze a queue to store the frames
samp_count=0 # a counter to count the number of smaple. one smaple has as many frames as defined by temporal length
for img in img_list:
samples.append(img)
if len(samples)==temporal_length: #if the queue has as many frames as temporal length, return it as one sample
samples_c=copy.deepcopy(samples) # copy the queue as in the next stage frames would be popped
samp_count+=1
for t in range(temporal_stride): # pop out as many frames as described by the stride from the left to accomodate new frames
samples.popleft()
yield samples_c,label_list[0] # return a sample(consisting of as many frames as defined by temporal length)
# and its corsponding label
```
#### A load function for loading the samples in the format of
[[[frame1_filename,frame2_filename,…],label1], [[frame1_filename,frame2_filename,…],label2],……….]
```
# Load the samples and their corresponding label for each video
def load_samples(data_cat='train',temporal_stride=1,temporal_length=16):
data_path = os.path.join('data_files',data_cat)
data_files = os.listdir(data_path)
# define a generator to read the samples
file_gen = file_generator(data_path,data_files,temporal_stride,temporal_length)
iterator = True
data_list = []
while iterator:
try:
x,y = next(file_gen)
x=list(x)
data_list.append([x,y])
except Exception as e:
print ('the exception: ',e)
iterator = False
print ('end of data generator')
return data_list
```
#### load the train data
```
train_data = load_samples(data_cat='train',temporal_stride=4,temporal_length=16)
print ('Total number of train samples:',len(train_data))
train_data[0]
train_data[5000:5002]
```
#### Load the test data
```
test_data = load_samples(data_cat='test',temporal_stride=4)
len(test_data)
```
#### Shuffle the dataset
```
def shuffle_data(samples):
data = shuffle(samples,random_state=2)
return data
def preprocess_image(img):
img = cv2.resize(img,(224,224))
img = img/255
return img
def data_generator(data,batch_size=10,temporal_padding='same',shuffle=True):
"""
Yields the next training batch.
data is an array [[img1_filename,img2_filename...,img16_filename],label1], [image2_filename,label2],...].
"""
num_samples = len(data)
if shuffle:
data = shuffle_data(data)
while True:
for offset in range(0, num_samples, batch_size):
print ('startring index: ', offset)
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
# Initialise X_train and y_train arrays for this batch
X_train = []
y_train = []
# For each example
for batch_sample in batch_samples: # Loop over every batch
# Load image (X)
x = batch_sample[0]
y = batch_sample[1]
temp_data_list = []
for img in x:
try:
img = cv2.imread(img)
#apply any kind of preprocessing here
#img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = preprocess_image(img)
temp_data_list.append(img)
except Exception as e:
print (e)
print ('error reading file: ',img)
# Read label (y)
#label = label_names[y]
# Add example to arrays
X_train.append(temp_data_list)
y_train.append(y)
# Make sure they're numpy arrays (as opposed to lists)
X_train = np.array(X_train)
#X_train = np.rollaxis(X_train,1,4)
y_train = np.array(y_train)
# convert to one hot encoding for training keras model
y_train = np_utils.to_categorical(y_train, 3)
# yield the next training batch
yield X_train, y_train
```
#### create a generator object with training data
```
train_generator = data_generator(train_data,batch_size=4,shuffle=True)
x,y = next(train_generator)
print ('x shape: ',x.shape)
print ('y shape: ',y.shape)
y
```
#### Let's visualize the first sample
```
x_0=x[2]
y_0=y[2]
print('x_0 shape: ',x_0.shape)
print('y_0 shape: ',y_0.shape)
Config.labels_to_class
activity = Config.labels_to_class[np.argmax(y_0)]
activity
```
#### Plot the first sample
```
num_of_images=16
fig=plt.figure(figsize=(8,8))
plt.title("one sample with {} frames ; activity:{}".format(num_of_images,activity))
subplot_num = int(np.ceil(np.sqrt(num_of_images)))
for i in range(int(num_of_images)):
ax = fig.add_subplot(subplot_num, subplot_num, i+1)
#ax.imshow(output_image[0,:,:,i],interpolation='nearest' ) #to see the first filter
ax.imshow(x_0[i,:,:,::-1])
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.show()
```
| github_jupyter |
<a href ="https://colab.research.google.com/github/GEM-benchmark/NL-Augmenter/blob/main/notebooks/Write_a_sample_transformation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# NL-Augmenter Colab example
* Play with an existing **transformation**
* Write your own **transformation**
* Play with an existing **filter**
* Write your own **filter**
Total running time: ~10 min
## Install NL-Augmenter from GitHub
```
!git clone https://www.github.com/GEM-benchmark/NL-Augmenter
cd NL-Augmenter
!pip install -r requirements.txt --quiet
```
## Load modules
```
from transformations.butter_fingers_perturbation.transformation import ButterFingersPerturbation
from transformations.change_person_named_entities.transformation import ChangePersonNamedEntities
from transformations.replace_numerical_values.transformation import ReplaceNumericalValues
from interfaces.SentenceOperation import SentenceOperation
from interfaces.QuestionAnswerOperation import QuestionAnswerOperation
from evaluation.evaluation_engine import evaluate, execute_model
from tasks.TaskTypes import TaskType
```
## Play with some existing transformations
```
t1 = ButterFingersPerturbation(max_outputs=3)
t1.generate("Jason wants to move back to India by the end of next year.")
t2 = ChangePersonNamedEntities(max_outputs=2)
t2.generate("Jason wants to move back to India by the end of next year.")
t3 = ReplaceNumericalValues(max_outputs=1)
t3.generate("Jason's 3 sisters want to move back to India")
```
## Define a simple transformation
Let's define a very basic transformation which just uppercases the sentence.
This transformation could be used for many [tasks](https://github.com/GEM-benchmark/NL-Augmenter/blob/add_filters_for_contrast_sets/tasks/TaskTypes.py) including text classification and generation. So, we need to populate the `tasks` variable to `[TaskType.TEXT_CLASSIFICATION, TaskType.TEXT_TO_TEXT_GENERATION]`. That's it!
```
class MySimpleTransformation(SentenceOperation):
tasks = [TaskType.TEXT_CLASSIFICATION, TaskType.TEXT_TO_TEXT_GENERATION]
languages = ["en"]
def generate(self, sentence):
return [sentence.upper()]
my_transformation = MySimpleTransformation()
my_transformation.generate("John was n't the person I had n't imagined.")
```
Obviously this can barely be called a transformation. What could this really achieve? Duh.
So, let's quickly compare the performance of a trained text classifier on a common test set, and a test set with MySimpleTransformation applied (or also called as a pertubed set) with this one line of code. And you need to hold your breadth for around 5 minutes!
```
execute_model(MySimpleTransformation, "TEXT_CLASSIFICATION", percentage_of_examples=1)
```
### 🕺 Voila! The accuracy on the perturbed set has fallen by 6% with this simple transformation!
So what happened internally? --> `execute_model` depending on the transformation type [SentenceOperation](https://github.com/GEM-benchmark/NL-Augmenter/blob/main/interfaces/SentenceOperation.py)) and the task you provided (TEXT_CLASSIFICATION) evaluated a pre-trained model of HuggingFace. In this case, a sentiment analysis model [aychang/roberta-base-imdb](https://huggingface.co/aychang/roberta-base-imdb) was chosen and evaluated on 1% of the [IMDB dataset](https://huggingface.co/datasets/imdb) with and without the transformation to check if the sentiment is predicted correctly.
If you want to evaluate this on your own model and dataset, you can pass the parameters as shown below in the `execute_model` method. Note that we obviously can't support each and every model type and dataset type and hence some models and datasets might require refactoring in the `evaluation_engine` class from your side and we are happy to help. 😊
```
# Here are the different parameters which are used as defaults!
# execute_model(MySimpleTransformation, "TEXT_CLASSIFICATION", "en", model_name = "aychang/roberta-base-imdb", dataset="imdb", percentage_of_examples=1)
```
## A Model Based Transformation
We don't want to restrict ourselves with just string level changes! We want to do more, don't we? So, let's use a pre-trained paraphrase generator to transform question answering examples. There is an exisiting interface [QuestionAnswerOperation](https://github.com/GEM-benchmark/NL-Augmenter/blob/main/interfaces/QuestionAnswerOperation.py) which takes as input the context, the question and the answer as inputs. Let's use that to augment our training data for question answering!
```
import torch
from transformers import T5ForConditionalGeneration, AutoTokenizer
class MySecondTransformation(QuestionAnswerOperation):
tasks = [TaskType.QUESTION_ANSWERING, TaskType.QUESTION_GENERATION]
languages = ["en"]
def __init__(self, max_outputs=5):
super().__init__()
model_name="prithivida/parrot_paraphraser_on_T5"
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = T5ForConditionalGeneration.from_pretrained(model_name)
self.max_outputs = max_outputs
def generate(self, context, question, answers): # Note that the choice of inputs for 'generate' is consistent with those in QuestionAnswerOperation
# Let's call the HF model to generate a paraphrase for the question
paraphrase_input = question
batch = self.tokenizer([paraphrase_input],truncation=True,padding='longest',max_length=60, return_tensors="pt")
translated = self.model.generate(**batch,max_length=60,num_beams=10, num_return_sequences=self.max_outputs, temperature=1.5)
paraphrased_questions = self.tokenizer.batch_decode(translated, skip_special_tokens=True)
# context = "Apply your own logic here"
# answers = "And here too :)"
# return the list of new question-answering examples
return [(context, paraphrase, answers) for paraphrase in paraphrased_questions]
t4 = MySecondTransformation()
t4.generate(context="Mumbai, Bengaluru, New Delhi are among the many famous places in India.",
question="What are the famous places we should not miss in India?",
answers=["Mumbai", "Bengaluru", "Delhi", "New Delhi"])
```
Voila! Seems like you have created a new training example now for question-answering and question-generation! 🎉 🎊 🎉
#Now you are all ready to contribute a transformation to [NL-Augmenter 🦎 → 🐍](https://github.com/GEM-benchmark/NL-Augmenter)!
## What is this deal with filters?
So, just the way transformations can transform examples of text, filters can identify whether an example follows some pattern of text! The only difference is that while transformations return another example of the same input format, filters return True or False!
sentence --> SentenceOperation.**generate**(sentence) --> List of perturbed sentence
sentence --> SentenceOperation.**filter**(sentence) --> TRUE/FALSE
#So, let's play with some existing filters!
```
from filters.keywords import TextContainsKeywordsFilter
from filters.length import TextLengthFilter, SentenceAndTargetLengthFilter
```
The `TextLengthFilter` accepts an input sentence if the length of the input sentence is within the initialised range. Let's initialise this filter to accept all sentences with length greater than 10 tokens!
```
f1 = TextLengthFilter(">", 10)
f1.filter("This sentence is long enough to pass while you think of implementing your own filter!")
f1.filter("This one's too short!")
```
Let's say you have a lot of paraphrasing data and you intend to train a paraphrase generator to convert longer sentences to shorter ones! Check how the `SentenceAndTargetLengthFilter` can be used for this!
```
f2 = SentenceAndTargetLengthFilter([">", "<"], [10,8])
f2.filter("That show is going to take place in front of immensely massive crowds.",
"Large crowds would attend the show.")
f2.filter("The film was nominated for the Academy Award for Best Art Direction.",
"The movie was a nominee for the Academy Award for Best Art Direction.")
```
Okay, now that you've said to yourself that these filters are too basic, let's try to make a simple and interesting one!
Let's define a filter which selects question-answer pairs which share a low lexical overlap between the question and the context!
```
import spacy
class LowLexicalOverlapFilter(QuestionAnswerOperation):
tasks = [TaskType.QUESTION_ANSWERING, TaskType.QUESTION_GENERATION]
languages = ["en"]
def __init__(self, threshold=3):
super().__init__()
self.nlp = spacy.load("en_core_web_sm")
self.threshold = threshold
def filter(self, context, question, answers):
# Note that the only difference between a filter and a transformation is this method!
# The inputs remain the same!
question_tokenized = self.nlp(question, disable=["parser", "tagger", "ner"])
context_tokenized = self.nlp(context, disable=["parser", "tagger", "ner"])
q_tokens = set([t.text for t in question_tokenized])
c_tokens = set([t.text for t in context_tokenized])
low_lexical_overlap = len(q_tokens.intersection(c_tokens)) > self.threshold
return low_lexical_overlap
f3 = LowLexicalOverlapFilter()
f3.filter("New York, is the most populous city in the United States.",
"Which is the most populous city of the United States?",
["New York"])
f3.filter("New York, is the most populous city in the United States.",
"Which city has the largest population in the US?",
["New York"])
```
That's it! So you have created a new filter which can separate the hard examples from the easy one! 🎉 🎊 🎉
#Now go ahead and contribute a nice filter to [NL-Augmenter 🦎 → 🐍](https://github.com/GEM-benchmark/NL-Augmenter)!
| github_jupyter |
# Machine Learning Engineer Nanodegree
## Introduction and Foundations
## Project 0: Titanic Survival Exploration
In 1912, the ship RMS Titanic struck an iceberg on its maiden voyage and sank, resulting in the deaths of most of its passengers and crew. In this introductory project, we will explore a subset of the RMS Titanic passenger manifest to determine which features best predict whether someone survived or did not survive. To complete this project, you will need to implement several conditional predictions and answer the questions below. Your project submission will be evaluated based on the completion of the code and your responses to the questions.
> **Tip:** Quoted sections like this will provide helpful instructions on how to navigate and use an iPython notebook.
# Getting Started
To begin working with the RMS Titanic passenger data, we'll first need to `import` the functionality we need, and load our data into a `pandas` DataFrame.
Run the code cell below to load our data and display the first few entries (passengers) for examination using the `.head()` function.
> **Tip:** You can run a code cell by clicking on the cell and using the keyboard shortcut **Shift + Enter** or **Shift + Return**. Alternatively, a code cell can be executed using the **Play** button in the hotbar after selecting it. Markdown cells (text cells like this one) can be edited by double-clicking, and saved using these same shortcuts. [Markdown](http://daringfireball.net/projects/markdown/syntax) allows you to write easy-to-read plain text that can be converted to HTML.
```
import numpy as np
import pandas as pd
# RMS Titanic data visualization code
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
```
From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship:
- **Survived**: Outcome of survival (0 = No; 1 = Yes)
- **Pclass**: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class)
- **Name**: Name of passenger
- **Sex**: Sex of the passenger
- **Age**: Age of the passenger (Some entries contain `NaN`)
- **SibSp**: Number of siblings and spouses of the passenger aboard
- **Parch**: Number of parents and children of the passenger aboard
- **Ticket**: Ticket number of the passenger
- **Fare**: Fare paid by the passenger
- **Cabin** Cabin number of the passenger (Some entries contain `NaN`)
- **Embarked**: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton)
Since we're interested in the outcome of survival for each passenger or crew member, we can remove the **Survived** feature from this dataset and store it as its own separate variable `outcomes`. We will use these outcomes as our prediction targets.
Run the code cell below to remove **Survived** as a feature of the dataset and store it in `outcomes`.
```
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
```
The very same sample of the RMS Titanic data now shows the **Survived** feature removed from the DataFrame. Note that `data` (the passenger data) and `outcomes` (the outcomes of survival) are now *paired*. That means for any passenger `data.loc[i]`, they have the survival outcome `outcome[i]`.
To measure the performance of our predictions, we need a metric to score our predictions against the true outcomes of survival. Since we are interested in how *accurate* our predictions are, we will calculate the proportion of passengers where our prediction of their survival is correct. Run the code cell below to create our `accuracy_score` function and test a prediction on the first five passengers.
**Think:** *Out of the first five passengers, if we predict that all of them survived, what would you expect the accuracy of our predictions to be?*
```
def accuracy_score(truth, pred):
""" Returns accuracy score for input truth and predictions. """
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions)
```
> **Tip:** If you save an iPython Notebook, the output from running code blocks will also be saved. However, the state of your workspace will be reset once a new session is started. Make sure that you run all of the code blocks from your previous session to reestablish variables and functions before picking up where you last left off.
# Making Predictions
If we were asked to make a prediction about any passenger aboard the RMS Titanic whom we knew nothing about, then the best prediction we could make would be that they did not survive. This is because we can assume that a majority of the passengers (more than 50%) did not survive the ship sinking.
The `predictions_0` function below will always predict that a passenger did not survive.
```
def predictions_0(data):
""" Model with no features. Always predicts a passenger did not survive. """
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
```
### Question 1
*Using the RMS Titanic data, how accurate would a prediction be that none of the passengers survived?*
**Hint:** Run the code cell below to see the accuracy of this prediction.
```
print accuracy_score(outcomes, predictions)
```
**Answer:** For a prediction that none of the passengers survived, it appears that the accuracy_score(as measured by the proportion of passengers where our prediction of their survival is correct)is 61.62%
***
Let's take a look at whether the feature **Sex** has any indication of survival rates among passengers using the `survival_stats` function. This function is defined in the `titanic_visualizations.py` Python script included with this project. The first two parameters passed to the function are the RMS Titanic data and passenger survival outcomes, respectively. The third parameter indicates which feature we want to plot survival statistics across.
Run the code cell below to plot the survival outcomes of passengers based on their sex.
```
survival_stats(data, outcomes, 'Sex')
```
Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females *did* survive the ship sinking. Let's build on our previous prediction: If a passenger was female, then we will predict that they survived. Otherwise, we will predict the passenger did not survive.
Fill in the missing code below so that the function will make this prediction.
**Hint:** You can access the values of each feature for a passenger like a dictionary. For example, `passenger['Sex']` is the sex of the passenger.
```
def predictions_1(data):
""" Model with one feature:
- Predict a passenger survived if they are female. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == "male":
predictions.append(0)
else:
predictions.append(1)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
```
### Question 2
*How accurate would a prediction be that all female passengers survived and the remaining passengers did not survive?*
**Hint:** Run the code cell below to see the accuracy of this prediction.
```
print accuracy_score(outcomes, predictions)
```
**Answer**: As we can see in the visualization most of the male passengers in Titanic didn't survive while many of the female passengers survived, so it seems reasonable that accuracy score has improved to 78.68% after predicting male passengers will not survive while female passengers will survive, compared to the score of 61.62% where the prediction was no one will survive.
***
Using just the **Sex** feature for each passenger, we are able to increase the accuracy of our predictions by a significant margin. Now, let's consider using an additional feature to see if we can further improve our predictions. For example, consider all of the male passengers aboard the RMS Titanic: Can we find a subset of those passengers that had a higher rate of survival? Let's start by looking at the **Age** of each male, by again using the `survival_stats` function. This time, we'll use a fourth parameter to filter out the data so that only passengers with the **Sex** 'male' will be included.
Run the code cell below to plot the survival outcomes of male passengers based on their age.
```
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
```
Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older *did not survive* the ship sinking. Let's continue to build on our previous prediction: If a passenger was female, then we will predict they survive. If a passenger was male and younger than 10, then we will also predict they survive. Otherwise, we will predict they do not survive.
Fill in the missing code below so that the function will make this prediction.
**Hint:** You can start your implementation of this function using the prediction code you wrote earlier from `predictions_1`.
```
def predictions_2(data):
""" Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == "female":
predictions.append(1)
elif passenger['Sex'] == "male" and passenger["Age"]<10:
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
```
### Question 3
*How accurate would a prediction be that all female passengers and all male passengers younger than 10 survived?*
**Hint:** Run the code cell below to see the accuracy of this prediction.
```
print accuracy_score(outcomes, predictions)
```
**Answer**: After the modification of predicting all males under the age of 10 also survives, the score improved to 79.35%.
***
Adding the feature **Age** as a condition in conjunction with **Sex** improves the accuracy by a small margin more than with simply using the feature **Sex** alone. Now it's your turn: Find a series of features and conditions to split the data on to obtain an outcome prediction accuracy of at least 80%. This may require multiple features and multiple levels of conditional statements to succeed. You can use the same feature multiple times with different conditions.
**Pclass**, **Sex**, **Age**, **SibSp**, and **Parch** are some suggested features to try.
Use the `survival_stats` function below to to examine various survival statistics.
**Hint:** To use mulitple filter conditions, put each condition in the list passed as the last argument. Example: `["Sex == 'male'", "Age < 18"]`
```
survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Age < 18"])
```
After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Make sure to keep track of the various features and conditions you tried before arriving at your final prediction model.
**Hint:** You can start your implementation of this function using the prediction code you wrote earlier from `predictions_2`.
```
survival_stats(data,outcomes,'Pclass', ["Sex == 'female'"])
survival_stats(data,outcomes,'Embarked', ["Sex == 'female'","Pclass == 1"])
survival_stats(data,outcomes,'Embarked', ["Sex == 'female'","Pclass == 2"])
survival_stats(data, outcomes, 'Embarked', ["Sex == 'female'", 'Pclass == 3'])
def predictions_3(data):
""" Model with multiple features. Makes a prediction with an accuracy of at least 80%. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == "female" :
if passenger["Pclass"] == 1 or passenger["Pclass"] == 2:
predictions.append(1)
elif passenger["Pclass"] == 3 and (passenger["Embarked"] == "C" or passenger["Embarked"] == "Q"):
predictions.append(1)
else:
predictions.append(0)
else:
if passenger["Sex"] == "male" and passenger["Age"] < 10:
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
# Print the accuracy score
print accuracy_score(outcomes, predictions)
```
### Question 4
*Describe the steps you took to implement the final prediction model so that it got an accuracy of at least 80%. What features did you look at? Were certain features more informative than others? Which conditions did you use to split the survival outcomes in the data? How accurate are your predictions?*
**Hint:** Run the code cell below to see the accuracy of your predictions.
```
print accuracy_score(outcomes, predictions)
```
## **Answer**:
### Methodology :
* Given 'Sex' is the most significant variable in predicting survival outcome accurately first we split the data on 'Sex' to check if the passenger is male or female. If the passenger is male, given almost all the male passengers died, we predict only the male passengers under the age of 10 will survive.
* For the female passengers, we essentially want to know which female passengers 'didn't survive' given most female passengers did survive. It seemed that PClass is a very significant variable as almost all the female passengers in the passenger class 1 and 2 survived while about half female members of the passenger3 class didn't survive. Thus we predict all female passengers of class 1 and 2 will survive.
* After splitting on the 'female' and 'pclass' variable where we predict all female passengers of passenger class 1 and 2 will survive, to see which female passnegers of the third class survive, the variable "Embarked" was used. Out of the female passengers of the class 3, majority of the passengers who embarked on the port "S" didn't survive while the passengers from the other two ports "C" and "Q" survived. So we predicted that the female passengers of class 3 who embarked on the ship from the port "Q" and "C" will survive and all other female passengers of class 3 will not survive. This ensured the accuracy score went up to 81.82%
# Conclusion
After several iterations of exploring and conditioning on the data, you have built a useful algorithm for predicting the survival of each passenger aboard the RMS Titanic. The technique applied in this project is a manual implementation of a simple machine learning model, the *decision tree*. A decision tree splits a set of data into smaller and smaller groups (called *nodes*), by one feature at a time. Each time a subset of the data is split, our predictions become more accurate if each of the resulting subgroups are more homogeneous (contain similar labels) than before. The advantage of having a computer do things for us is that it will be more exhaustive and more precise than our manual exploration above. [This link](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/) provides another introduction into machine learning using a decision tree.
A decision tree is just one of many models that come from *supervised learning*. In supervised learning, we attempt to use features of the data to predict or model things with objective outcome labels. That is to say, each of our data points has a known outcome value, such as a categorical, discrete label like `'Survived'`, or a numerical, continuous value like predicting the price of a house.
### Question 5
*Think of a real-world scenario where supervised learning could be applied. What would be the outcome variable that you are trying to predict? Name two features about the data used in this scenario that might be helpful for making the predictions.*
**Answer**:
Supervised learning could be predicted to determine whether certain new users of Quora/Reddit or similar content aggregation sites are going to be trolls/spammers or not(trolls = users who would create unwanted behavior in the community by violating rules). We can use features like : Number of reports against the users edits, number of downvotes they receive, whether they are reported for fake names/spams or not etc to predict trolls. The labels will be spammer/non-spammer or trolls/regular users.
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to
**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
| github_jupyter |
# iOS人脸识别库(静态库)
## 创建静态库

## dlib库源代码添加到工程
* 把dlib拷贝到新建工程目录下
## 编译dlib库
* 使用Xcode构建dlib工程
```bash
cd dlib/dlib
mkdir build
cd build
cmake -G Xcode ..
```
* 打开工程文件dlib.xcodeproj进行配置
```
TARGETS -> dlib -> Build Settings -> Architectures -> Architectures = arm64 arm64e armv7 armv7s
# Architectures默认值是$(ARCHS_STANDARD),这个值会随着iOS Deployment Target的设置而变化。如果不需要支持32位CPU的话就不用设置这个。
TARGETS -> dlib -> Build Settings -> Architectures -> Base SDK = iOS
TARGETS -> dlib -> Build Settings -> Architectures -> Build Active Architecture Only = No
TARGETS -> dlib -> Build Settings -> Architectures -> Supported Platforms = iOS
TARGETS -> dlib -> Build Settings -> Architectures -> Valid Architectures = arm64 arm64e armv7 armv7s
TARGETS -> dlib -> Build Settings -> Deployment -> iOS Deployment Target = iOS 10.0
# 要支持 armv7,需要使用 iOS 10.0 及以下版本
设置活动的Scheme为dlib,Edit Scheme -> Run -> Build Configuration = Release
```
* [No architectures to compile for (ONLY_ACTIVE_ARCH=YES, active arch=x86_64, VALID_ARCHS=armv7 armv7s)](https://stackoverflow.com/questions/12889065/no-architectures-to-compile-for-only-active-arch-yes-active-arch-x86-64-valid)
* 编译(Product -> Build)
```
通过设置活动的Scheme,编译设备和模拟器对应的静态库。
```
* 合并为一个静态库
```bash
lipo -create Debug-iphoneos/libdlib-ios.a Debug-iphonesimulator/libdlib-ios.a -output libdlib.a
```
## 拷贝libdlib.a到工程目录下的libs目录
## 工程配置
```
TARGETS -> face-recognition -> Build Settings -> Build Active Architecture Only = No
TARGETS -> face-recognition -> Build Settings -> Header Search Paths = dlib/
TARGETS -> face-recognition -> Build Settings -> Library Search Paths = libs/
TARGETS -> face-recognition -> Build Phases -> Link Binary With Libraries 添加 libdlib.a
TARGETS -> face-recognition -> Build Phases -> Copy Files -> Add -> face_recognition.hpp
TARGETS -> Add -> Aggregate -> distribution
TARGETS -> distribution -> Build Phases -> Add -> New Run Script Phase
TARGETS -> distribution -> Build Phases -> Run Script -> Shell 添加 ./build_distribution.sh
设置活动的Scheme为distribution,Edit Scheme -> Run -> Build Configuration = Release
```
## 创建用于自动发布的脚本
* 在工程目录下新建脚本文件:build_distribution.sh
```bash
#!/bin/sh
PROJ=${PROJECT_NAME}.xcodeproj
LIB_STATIC_NAME=${PROJECT_NAME} #把项目名改为自己的即可使用
DISTRIBUTION_DIR=./distribution #打包出来的文件的目录
IPHONE_OS_DIR=${DISTRIBUTION_DIR}/${CONFIGURATION}-iphoneos
IPHONE_SIMULATOR_DIR=${DISTRIBUTION_DIR}/${CONFIGURATION}-iphonesimulator
#创建真机库文件目录
if [[ ! -d ${IPHONE_OS_DIR} ]]; then
mkdir -p ${IPHONE_OS_DIR}
fi
#创建模拟器库文件目录
if [[ ! -d ${IPHONE_SIMULATOR_DIR} ]]; then
mkdir -p ${IPHONE_SIMULATOR_DIR}
fi
#error: error: accessing build database "/Users/wjj/Library/Developer/Xcode/DerivedData/face-recognition-ardscqnlcsvcvtgjntliogvokvae/Build/Intermediates.noindex/XCBuildData/build.db": disk I/O error
#-UseModernBuildSystem=NO 解决上面的问题(https://stackoverflow.com/questions/51153525/xcode-10-unable-to-attach-db-error)
#编译真机库文件
xcodebuild -project ${PROJ} \
-scheme ${LIB_STATIC_NAME} \
-configuration ${CONFIGURATION} \
-sdk iphoneos \
-UseModernBuildSystem=NO \
clean \
build \
CONFIGURATION_BUILD_DIR=${IPHONE_OS_DIR}
# -archivePath ${IPHONE_OS_DIR}
#编译模拟器库文件
xcodebuild build -project ${PROJ} \
-scheme ${LIB_STATIC_NAME} \
-configuration ${CONFIGURATION} \
-sdk iphonesimulator \
-UseModernBuildSystem=NO \
clean \
build \
CONFIGURATION_BUILD_DIR=${IPHONE_SIMULATOR_DIR}
# -archivePath ${IPHONE_SIMULATOR_DIR}
##建立SDK目录
SDK_DIR=${DISTRIBUTION_DIR}/${LIB_STATIC_NAME}
if [[ -d ${SDK_DIR} ]]; then
rm -fR ${SDK_DIR}
fi
mkdir -p ${SDK_DIR}
# 静态库文件
LIB_NAME=lib${LIB_STATIC_NAME}.a
#合并模拟器文件和真机文件
lipo -create ${IPHONE_OS_DIR}/${LIB_NAME} ${IPHONE_SIMULATOR_DIR}/${LIB_NAME} -output ${SDK_DIR}/${LIB_NAME}
#拷贝头文件
cp -R ${IPHONE_OS_DIR}/include/${LIB_STATIC_NAME}/* ${SDK_DIR}/
#打包为zip文件
PACKAGE_DATE=`date '+%Y%m%d%H'`
pushd ${DISTRIBUTION_DIR}
SDK_ZIP_NAME=iOS_${LIB_STATIC_NAME}_${PACKAGE_DATE}_${CONFIGURATION}.zip
zip -qr ${SDK_ZIP_NAME} ${LIB_STATIC_NAME}
popd ${DISTRIBUTION_DIR}
```
## 给脚本添加执行权限
```bash
$ sudo chmod +x build_distribution.sh
```
## 编译工程
## 参考资料
* [Cross-compile to static lib (libgcrypt) for use on iOS](https://stackoverflow.com/questions/26812060/cross-compile-to-static-lib-libgcrypt-for-use-on-ios)
* [iOS 制作静态库自动编译脚本](https://www.jianshu.com/p/c796fcba1604)
* [静态库自动打包合并静态库脚本](https://blog.csdn.net/zxw_xzr/article/details/79217516)
* [iOS编译OpenSSL静态库(使用脚本自动编译)](https://www.jianshu.com/p/651513cab181)
* [iOS 静态库打包流程简化](https://blog.csdn.net/zhouzhoujianquan/article/details/53192597)
* [linux 下shell中if的“-e,-d,-f”是什么意思](https://www.cnblogs.com/senior-engineer/p/6206329.html)
* [Linux命令-cp 把整个目录下文件复制到另一个目录](https://blog.csdn.net/sdtvyyb_007/article/details/53423182)
* [Compile Collada DOM (c++) for use in iOS](https://stackoverflow.com/questions/30810809/compile-collada-dom-c-for-use-in-ios)
* [How to set up CMake to build a library for the iPhone](https://stackoverflow.com/questions/794137/how-to-set-up-cmake-to-build-a-library-for-the-iphone)
* [ios-cmake](https://github.com/sheldonth/ios-cmake/blob/master/ios.cmake)
* [How to set up CMake to build an app for the iPhone](https://stackoverflow.com/questions/822404/how-to-set-up-cmake-to-build-an-app-for-the-iphone)
* [How can I make Cmake use specific compiler and flags when final compilation stage instead of detection?](https://stackoverflow.com/questions/6476203/how-can-i-make-cmake-use-specific-compiler-and-flags-when-final-compilation-stag)
* [Xcode7 制作Framework](https://www.cnblogs.com/developer-qin/p/5729250.html)
* [iOS framework](http://www.cnblogs.com/developer-qin/p/5691008.html)
* [Xcode 6制作动态及静态Framework](http://www.cocoachina.com/ios/20141126/10322.html)
* [Xcode 10: unable to attach DB error](https://stackoverflow.com/questions/51153525/xcode-10-unable-to-attach-db-error)
* [Xcode 10 beta error while building my project?](https://stackoverflow.com/questions/51562894/xcode-10-beta-error-while-building-my-project)
* [用lipo合并模拟器Framework与真机Framework](https://blog.csdn.net/gavin8803/article/details/52103671)
* [iOS中的静态库与动态库,区别、制作和使用](http://www.cocoachina.com/ios/20161109/18031.html)
* [Xcode7 制作通用的framework,静态库和动态库](https://www.jianshu.com/p/2b04935d6943)
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TensorFlow Probability Case Study: Covariance Estimation
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/TensorFlow_Probability_Case_Study_Covariance_Estimation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/TensorFlow_Probability_Case_Study_Covariance_Estimation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
I wrote this notebook as a case study to learn TensorFlow Probability. The problem I chose to solve is estimating a covariance matrix for samples of a 2-D mean 0 Gaussian random variable. The problem has a couple of nice features:
* If we use an inverse Wishart prior for the covariance (a common approach), the problem has an analytic solution, so we can check our results.
* The problem involves sampling a constrained parameter, which adds some interesting complexity.
* The most straightforward solution is not the fastest one, so there is some optimization work to do.
I decided to write my experiences up as I went along. It took me awhile to wrap my head around the finer points of TFP, so this notebook starts fairly simply and then gradually works up to more complicated TFP features. I ran into lots of problems along the way, and I've tried to capture both the processes that helped me identify them and the workarounds I eventually found. I've tried to include *lots* of detail (including lots of tests to make sure individual steps are correct).
## Why learn TensorFlow Probability?
I found TensorFlow Probability appealing for my project for a few reasons:
* TensorFlow probability lets you prototype develop complex models interactively in a notebook. You can break your code up into small pieces that you can test interactively and with unit tests.
* Once you're ready to scale up, you can take advantage of all of the infrastructure we have in place for making TensorFlow run on multiple, optimized processors on multiple machines.
* Finally, while I really like Stan, I find it quite difficult to debug. You have to write all your modeling code in a standalone language that has very few tools for letting you poke at your code, inspect intermediate states, and so on.
The downside is that TensorFlow Probability is much newer than Stan and PyMC3, so the documentation is a work in progress, and there's lots of functionality that's yet to be built. Happily, I found TFP's foundation to be solid, and it's designed in a modular way that allows one to extend its functionality fairly straightforwardly. In this notebook, in addition to solving the case study, I'll show some ways to go about extending TFP.
## Who this is for
I'm assuming that readers are coming to this notebook with some important prerequisites. You should:
* Know the basics of Bayesian inference. (If you don't, a really nice first book is *[Statistical Rethinking](http://xcelab.net/rm/statistical-rethinking/)*)
* Have some familiarity with an MCMC sampling library, e.g. [Stan](http://mc-stan.org/) / [PyMC3](http://docs.pymc.io/) / [BUGS](https://www.mrc-bsu.cam.ac.uk/software/bugs/)
* Have a solid grasp of [NumPy](http://www.numpy.org/) (One good intro is *[Python for Data Analysis](http://shop.oreilly.com/product/0636920023784.do)*)
* Have at least passing familiarity with [TensorFlow](https://www.tensorflow.org/), but not necessarily expertise. (*[Learning TensorFlow](http://shop.oreilly.com/product/0636920063698.do)* is good, but TensorFlow's rapid evolution means that most books will be a bit dated. Stanford's [CS20](https://web.stanford.edu/class/cs20si/) course is also good.)
# First attempt
Here's my first attempt at the problem. Spoiler: my solution doesn't work, and it's going to take several attempts to get things right! Although the process takes awhile, each attempt below has been useful for learning a new part of TFP.
One note: TFP doesn't currently implement the inverse Wishart distribution (we'll see at the end how to roll our own inverse Wishart), so instead I'll change the problem to that of estimating a precision matrix using a Wishart prior.
```
import collections
import math
import os
import time
import numpy as np
import pandas as pd
import scipy
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
```
## Step 1: get the observations together
My data here are all synthetic, so this is going to seem a bit tidier than a real-world example. However, there's no reason you can't generate some synthetic data of your own.
**Tip**: Once you've decided on the form of your model, you can pick some parameter values and use your chosen model to generate some synthetic data. As a sanity check of your implementation, you can then verify that your estimates include the true values of the parameters you chose. To make your debugging / testing cycle faster, you might consider a simplified version of your model (e.g. use fewer dimensions or fewer samples).
**Tip:** It's easiest to work with your observations as NumPy arrays. One important thing to note is that NumPy by default uses float64's, while TensorFlow by default uses float32's.
In general, TensorFlow operations want all arguments to have the same type, and you have to do explicit data casting to change types. If you use float64 observations, you'll need to add in a lot of cast operations. NumPy, in contrast, will take care of casting automatically. Hence, **it is *much* easier to convert your Numpy data into float32 than it is to force TensorFlow to use float64.**
### Choose some parameter values
```
# We're assuming 2-D data with a known true mean of (0, 0)
true_mean = np.zeros([2], dtype=np.float32)
# We'll make the 2 coordinates correlated
true_cor = np.array([[1.0, 0.9], [0.9, 1.0]], dtype=np.float32)
# And we'll give the 2 coordinates different variances
true_var = np.array([4.0, 1.0], dtype=np.float32)
# Combine the variances and correlations into a covariance matrix
true_cov = np.expand_dims(np.sqrt(true_var), axis=1).dot(
np.expand_dims(np.sqrt(true_var), axis=1).T) * true_cor
# We'll be working with precision matrices, so we'll go ahead and compute the
# true precision matrix here
true_precision = np.linalg.inv(true_cov)
# Here's our resulting covariance matrix
print true_cov
# Verify that it's positive definite, since np.random.multivariate_normal
# complains about it not being positive definite for some reason.
# (Note that I'll be including a lot of sanity checking code in this notebook -
# it's a *huge* help for debugging)
print 'eigenvalues: ', np.linalg.eigvals(true_cov)
```
### Generate some synthetic observations
Note that **TensorFlow Probability uses the convention that the initial dimension(s) of your data represent sample indices, and the final dimension(s) of your data represent the dimensionality of your samples.**
Here we want 100 samples, each of which is a vector of length 2. We'll generate an array `my_data` with shape (100, 2). `my_data[i, :]` is the $i$th sample, and it is a vector of length 2.
(Remember to make `my_data` have type float32!)
```
# Set the seed so the results are reproducible.
np.random.seed(123)
# Now generate some observations of our random variable.
# (Note that I'm suppressing a bunch of spurious about the covariance matrix
# not being positive semidefinite via check_valid='ignore' because it really is
# positive definite!)
my_data = np.random.multivariate_normal(
mean=true_mean, cov=true_cov, size=100,
check_valid='ignore').astype(np.float32)
my_data.shape
```
### Sanity check the observations
One potential source of bugs is messing up your synthetic data! Let's do some simple checks.
```
# Do a scatter plot of the observations to make sure they look like what we
# expect (higher variance on the x-axis, y values strongly correlated with x)
plt.scatter(my_data[:, 0], my_data[:, 1], alpha=0.75)
plt.show()
print 'mean of observations:', np.mean(my_data, axis=0)
print 'true mean:', true_mean
print 'covariance of observations:\n', np.cov(my_data, rowvar=False)
print 'true covariance:\n', true_cov
```
Ok, our samples look reasonble. Next step.
## Step 2: Implement the likelihood function in NumPy
The main thing we'll need to write to perform our MCMC sampling in TF Probability is a log likelihood function. In general it's a bit trickier to write TF than NumPy, so I find it helpful to do an initial implementation in NumPy. I'm going to split the likelihood function into 2 pieces, a data likelihood function that corresponds to $P(data | parameters)$ and a prior likelihood function that corresponds to $P(parameters)$.
Note that these NumPy functions don't have to be super optimized / vectorized since the goal is just to generate some values for testing. Correctness is the key consideration!
First we'll implement the data log likelihood piece. That's pretty straightforward. The one thing to remember is that we're going to be working with precision matrices, so we'll parameterize accordingly.
```
def log_lik_data_numpy(precision, data):
# np.linalg.inv is a really inefficient way to get the covariance matrix, but
# remember we don't care about speed here
cov = np.linalg.inv(precision)
rv = scipy.stats.multivariate_normal(true_mean, cov)
return np.sum(rv.logpdf(data))
# test case: compute the log likelihood of the data given the true parameters
log_lik_data_numpy(true_precision, my_data)
```
We're going to use a Wishart prior for the precision matrix since there's an analytical solution for the posterior (see [Wikipedia's handy table of conjugate priors](https://en.wikipedia.org/wiki/Conjugate_prior#Continuous_distributions)).
The [Wishart distribution](https://en.wikipedia.org/wiki/Wishart_distribution) has 2 parameters:
* the number of *degrees of freedom* (labeled $\nu$ in Wikipedia)
* a *scale matrix* (labeled $V$ in Wikipedia)
The mean for a Wishart distribution with parameters $\nu, V$ is $E[W] = \nu V$, and the variance is $\text{Var}(W_{ij}) = \nu(v_{ij}^2+v_{ii}v_{jj})$
Some useful intuition: You can generate a Wishart sample by generating $\nu$ independent draws $x_1 \ldots x_{\nu}$ from a multivariate normal random variable with mean 0 and covariance $V$ and then forming the sum $W = \sum_{i=1}^{\nu} x_i x_i^T$.
If you rescale Wishart samples by dividing them by $\nu$, you get the sample covariance matrix of the $x_i$. This sample covariance matrix should tend toward $V$ as $\nu$ increases. When $\nu$ is small, there is lots of variation in the sample covariance matrix, so small values of $\nu$ correspond to weaker priors and large values of $\nu$ correspond to stronger priors. Note that $\nu$ must be at least as large as the dimension of the space you're sampling or you'll generate singular matrices.
We'll use $\nu = 3$ so we have a weak prior, and we'll take $V = \frac{1}{\nu} I$ which will pull our covariance estimate toward the identity (recall that the mean is $\nu V$).
```
PRIOR_DF = 3
PRIOR_SCALE = np.eye(2, dtype=np.float32) / PRIOR_DF
def log_lik_prior_numpy(precision):
rv = scipy.stats.wishart(df=PRIOR_DF, scale=PRIOR_SCALE)
return rv.logpdf(precision)
# test case: compute the prior for the true parameters
log_lik_prior_numpy(true_precision)
```
The Wishart distribution is the conjugate prior for estimating the precision matrix of a multivariate normal with known mean $\mu$.
Suppose the prior Wishart parameters are $\nu, V$ and that we have $n$ observations of our multivariate normal, $x_1, \ldots, x_n$. The posterior parameters are $n + \nu, \left(V^{-1} + \sum_{i=1}^n (x_i-\mu)(x_i-\mu)^T \right)^{-1}$.
```
n = my_data.shape[0]
nu_prior = PRIOR_DF
v_prior = PRIOR_SCALE
nu_posterior = nu_prior + n
v_posterior = np.linalg.inv(np.linalg.inv(v_prior) + my_data.T.dot(my_data))
posterior_mean = nu_posterior * v_posterior
v_post_diag = np.expand_dims(np.diag(v_posterior), axis=1)
posterior_sd = np.sqrt(nu_posterior *
(v_posterior ** 2.0 + v_post_diag.dot(v_post_diag.T)))
```
A quick plot of the posteriors and the true values. Note that the posteriors are close to the sample posteriors but are shrunk a bit toward the identity. Note also that the true values are pretty far from the mode of the posterior - presumably this is because prior isn't a very good match for our data. In a real problem we'd likely do better with something like a scaled inverse Wishart prior for the covariance (see, for example, Andrew Gelman's [commentary](http://andrewgelman.com/2012/08/22/the-scaled-inverse-wishart-prior-distribution-for-a-covariance-matrix-in-a-hierarchical-model/) on the subject), but then we wouldn't have a nice analytic posterior.
```
sample_precision = np.linalg.inv(np.cov(my_data, rowvar=False, bias=False))
fig, axes = plt.subplots(2, 2)
fig.set_size_inches(10, 10)
for i in range(2):
for j in range(2):
ax = axes[i, j]
loc = posterior_mean[i, j]
scale = posterior_sd[i, j]
xmin = loc - 3.0 * scale
xmax = loc + 3.0 * scale
x = np.linspace(xmin, xmax, 1000)
y = scipy.stats.norm.pdf(x, loc=loc, scale=scale)
ax.plot(x, y)
ax.axvline(true_precision[i, j], color='red', label='True precision')
ax.axvline(sample_precision[i, j], color='red', linestyle=':', label='Sample precision')
ax.set_title('precision[%d, %d]' % (i, j))
plt.legend()
plt.show()
```
## Step 3: Implement the likelihood function in TensorFlow
Spoiler: Our first attempt isn't going to work; we'll talk about why below.
**Tip**: use TensorFlow eager mode when developing your likelihood functions. Eager mode makes TF behave more like NumPy - everything executes immediately, so you can debug interactively instead of having to use `Session.run()`. See the notes [here](https://www.tensorflow.org/programmers_guide/eager).
### Preliminary: Distribution classes
TFP has a collection of distribution classes that we'll use to generate our log probabilities. One thing to note is that these classes work with tensors of samples rather than just single samples - this allows for vectorization and related speedups.
A distribution can work with a tensor of samples in 2 different ways. It's simplest to illustrate these 2 ways with a concrete example involving a distribution with a single scalar paramter. I'll use the [Poisson](https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/Poisson) distribution, which has a `rate` parameter.
* If we create a Poisson with a single value for the `rate` parameter, a call to its `sample()` method return a single value. This value is called an **`event`**, and in this case the events are all scalars.
* If we create a Poisson with a tensor of values for the `rate` parameter, a call to its `sample()`method now returns multiple values, one for each value in the rate tensor. The object acts as a *collection* of independent Poissons, each with its own rate, and each of the values returned by a call to `sample()` corresponds to one of these Poissons. This collection of independent *but not identically distributed* events is called a **`batch`**.
* The `sample()` method takes a `sample_shape` parameter which defaults to an empty tuple. Passing a non-empty value for `sample_shape` results in sample returning multiple batches. This collection of batches is called a **`sample`**.
A distribution's `log_prob()` method consumes data in a manner that parallels how `sample()` generates it. `log_prob()` returns probabilities for samples, i.e. for multiple, independent batches of events.
* If we have our Poisson object that was created with a scalar `rate`, each batch is a scalar, and if we pass in a tensor of samples, we'll get out a tensor of the same size of log probabilities.
* If we have our Poisson object that was created with a tensor of shape `T` of `rate` values, each batch is a tensor of shape `T`. If we pass in a tensor of samples of shape D, T, we'll get out a tensor of log probabilities of shape D, T.
Below are some examples that illustrate these cases. See [this notebook](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb) for a more detailed tutorial on events, batches, and shapes.
A few conventions I'll be using in this notebook:
1. In general I'll create **new graphs for individual cells**. That way cells are self-contained, and operations I create in one cell won't have side effects in other cells.
2. I'll **separate graph construction from execution**. Cells will have the form:
```
with tf.Graph().as_default() as g:
# construct my local graph
...
g.finalize() # make sure the graph doesn't change after construction
with tf.Session(graph=g) as sess:
# run my graph
```
3. I'll use **underscores at the end of variable names to indicate they contain the output of the TensorFlow operation without the underscore**. For example:
```
x = tf.add(y, z) # x is a TensorFlow operation
x_ = sess.run(x) # x_ is the output of the operation x
```
```
with tf.Graph().as_default() as g:
# case 1: get log probabilities for a vector of iid draws from a single
# normal distribution
norm1 = tfd.Normal(loc=0., scale=1.)
probs1 = norm1.log_prob(tf.constant([1., 0.5, 0.]))
# case 2: get log probabilities for a vector of independent draws from
# multiple normal distributions with different parameters. Note the vector
# values for loc and scale in the Normal constructor.
norm2 = tfd.Normal(loc=[0., 2., 4.], scale=[1., 1., 1.])
probs2 = norm2.log_prob(tf.constant([1., 0.5, 0.]))
g.finalize()
with tf.Session(graph=g) as sess:
print 'iid draws from a single normal:', sess.run(probs1)
print 'draws from a batch of normals:', sess.run(probs2)
```
### Data log likelihood
First we'll implement the data log likelihood function.
Note: distributions can validate their input, but they don't do so by default. We'll definitely want to turn on validation while we're debugging! Once everything is working, we can turn validation off if speed is really critical.
```
VALIDATE_ARGS = True
ALLOW_NAN_STATS = False
```
One key difference from the NumPy case is that our TensorFlow likelihood function will need to handle vectors of precision matrices rather than just single matrices. Vectors of parameters will be used when we sample from multiple chains.
We'll create a distribution object that works with a batch of precision matrices (i.e. one matrix per chain).
When computing log probabilities of our data, we'll need our data to be replicated in the same manner as our parameters so that there is one copy per batch variable. The shape of our replicated data will need to be as follows:
`[sample shape, batch shape, event shape]`
In our case, the event shape is 2 (since we are working with 2-D Gaussians). The sample shape is 100, since we have 100 samples. The batch shape will just be the number of precision matrices we're working with. It's wasteful to replicate the data each time we call the likelihood function, so we'll replicate the data in advance and pass in the replicated version.
Note that this is an inefficient implementation: `MultivariateNormalFullCovariance` is expensive relative to some alternatives that we'll talk about in the optimization section at the end.
```
def log_lik_data(precisions, replicated_data):
n = tf.shape(precisions)[0] # number of precision matrices
# We're estimating a precision matrix; we have to invert to get log
# probabilities. Cholesky inversion should be relatively efficient,
# but as we'll see later, it's even better if we can avoid doing the Cholesky
# decomposition altogether.
precisions_cholesky = tf.cholesky(precisions)
covariances = tf.cholesky_solve(precisions_cholesky,
tf.eye(2, batch_shape=[n]))
rv_data = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros([n, 2]),
covariance_matrix=covariances,
validate_args=VALIDATE_ARGS,
allow_nan_stats=ALLOW_NAN_STATS)
return tf.reduce_sum(rv_data.log_prob(replicated_data), axis=0)
# For our test, we'll use a tensor of 2 precision matrices.
# We'll need to replicate our data for the likelihood function.
# Remember, TFP wants the data to be structured so that the sample dimensions
# are first (100 here), then the batch dimensions (2 here because we have 2
# precision matrices), then the event dimensions (2 because we have 2-D
# Gaussian data). We'll need to add a middle dimension for the batch using
# expand_dims, and then we'll need to create 2 replicates in this new dimension
# using tile.
n = 2
replicated_data = np.tile(np.expand_dims(my_data, axis=1), reps=[1, 2, 1])
print replicated_data.shape
```
**Tip:** One thing I've found to be extremely helpful is writing little sanity checks of my TensorFlow functions. It's really easy to mess up the vectorization in TF, so having the simpler NumPy functions around is a great way to verify the TF output. Think of these as little unit tests.
```
# check against the numpy implementation
with tf.Graph().as_default() as g:
precisions = np.stack([np.eye(2, dtype=np.float32), true_precision])
n = precisions.shape[0]
lik_tf = log_lik_data(precisions, replicated_data=replicated_data)
g.finalize()
with tf.Session(graph=g) as sess:
lik_tf_ = sess.run(lik_tf)
for i in range(n):
print i
print 'numpy:', log_lik_data_numpy(precisions[i], my_data)
print 'tensorflow:', lik_tf_[i]
```
### Prior log likelihood
The prior is easier since we don't have to worry about data replication.
```
def log_lik_prior(precisions):
rv_precision = tfd.Wishart(
df=PRIOR_DF,
scale=PRIOR_SCALE,
validate_args=VALIDATE_ARGS,
allow_nan_stats=ALLOW_NAN_STATS)
return rv_precision.log_prob(precisions)
# check against the numpy implementation
with tf.Graph().as_default() as g:
precisions = np.stack([np.eye(2, dtype=np.float32), true_precision])
n = precisions.shape[0]
lik_tf = log_lik_prior(precisions)
g.finalize()
with tf.Session(graph=g) as sess:
lik_tf_ = sess.run(lik_tf)
for i in range(n):
print i
print 'numpy:', log_lik_prior_numpy(precisions[i])
print 'tensorflow:', lik_tf_[i]
```
### Build the joint log likelihood function
The data log likelihood function above depends on our observations, but the sampler won't have those. We can get rid of the dependency without using a global variable by using a [closure](https://en.wikipedia.org/wiki/Closure_(computer_programming). Closures involve an outer function that build an environment containing variables needed by an inner function.
```
def get_log_lik(data, n_chains=1):
# The data argument that is passed in will be available to the inner function
# below so it doesn't have to be passed in as a parameter.
replicated_data = np.tile(np.expand_dims(data, axis=1), reps=[1, n_chains, 1])
def _log_lik(precision):
return log_lik_data(precision, replicated_data) + log_lik_prior(precision)
return _log_lik
```
## Step 4: Sample
Ok, time to sample! To keep things simple, we'll just use 1 chain and we'll use the identity matrix as the starting point. We'll do things more carefully later.
Again, this isn't going to work - we'll get an exception.
```
with tf.Graph().as_default() as g:
# Use expand_dims because we want to pass in a tensor of starting values
init_precision = tf.expand_dims(tf.eye(2), axis=0)
log_lik_fn = get_log_lik(my_data, n_chains=1)
# we'll just do a few steps here
num_results = 10
num_burnin_steps = 10
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=[
init_precision,
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_lik_fn,
step_size=0.1,
num_leapfrog_steps=3,
seed=123),
parallel_iterations=1)
g.finalize()
with tf.Session(graph=g) as sess:
tf.set_random_seed(123)
try:
states_, kernel_results_ = sess.run([states, kernel_results])
except Exception, e:
# shorten the giant stack trace
lines = str(e).split('\n')
print '\n'.join(lines[:5]+['...']+lines[-3:])
```
### Identifying the problem
`InvalidArgumentError (see above for traceback): Cholesky decomposition was not successful. The input might not be valid.` That's not super helpful. Let's see if we can find out more about what happened.
* We'll print out the parameters for each step so we can see the value for which things fail
* We'll add some assertions to guard against specific problems.
Assertions are tricky because they're TensorFlow operations, and we have to take care that they get executed and don't get optimized out of the graph. It's worth reading [this overview](https://wookayin.github.io/tensorflow-talk-debugging/#1) of TensorFlow debugging if you aren't familiar with TF assertions. You can explicitly force assertions to execute using `tf.control_dependencies` (see the comments in the code below).
TensorFlow's native `Print` function has the same behavior as assertions - it's an operation, and you need to take some care to ensure that it executes. `Print` causes additional headaches when we're working in a notebook: its output is sent to `stderr`, and `stderr` isn't displayed in the cell. We'll use a trick here: instead of using `tf.Print`, we'll create our own TensorFlow print operation via `tf.pyfunc`. As with assertions, we have to make sure our method executes.
```
def get_log_lik_verbose(data, n_chains=1):
# The data argument that is passed in will be available to the inner function
# below so it doesn't have to be passed in as a parameter.
replicated_data = np.tile(np.expand_dims(data, axis=1), reps=[1, n_chains, 1])
def _log_lik(precisions):
# An internal method we'll make into a TensorFlow operation via tf.py_func
def _print_precisions(precisions):
print 'precisions:\n', precisions
return False # operations must return something!
# Turn our method into a TensorFlow operation
print_op = tf.py_func(_print_precisions, [precisions], tf.bool)
# Assertions are also operations, and some care needs to be taken to ensure
# that they're executed
assert_op = tf.assert_equal(
precisions, tf.transpose(precisions, perm=[0, 2, 1]), data=[precisions],
message='not symmetrical', summarize=4, name='symmetry_check')
# The control_dependencies statement forces its arguments to be executed
# before subsequent operations
with tf.control_dependencies([print_op, assert_op]):
return (log_lik_data(precisions, replicated_data) +
log_lik_prior(precisions))
return _log_lik
with tf.Graph().as_default() as g:
tf.set_random_seed(123)
init_precision = tf.expand_dims(tf.eye(2), axis=0)
log_lik_fn = get_log_lik_verbose(my_data)
# we'll just do a few steps here
num_results = 10
num_burnin_steps = 10
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=[
init_precision,
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_lik_fn,
step_size=0.1,
num_leapfrog_steps=3,
seed=123),
parallel_iterations=1)
g.finalize()
with tf.Session(graph=g) as sess:
try:
states_, kernel_results_ = sess.run([states, kernel_results])
except Exception, e:
# shorten the giant stack trace
lines = str(e).split('\n')
print '\n'.join(lines[:5]+['...']+lines[-3:])
```
### Why this fails
The very first new parameter value the sampler tries is an asymmetrical matrix. That causes the Cholesky decomposition to fail, since it's only defined for symmetrical (and positive definite) matrices.
The problem here is that our parameter of interest is a precision matrix, and precision matrices must be real, symmetric, and positive definite. The sampler doesn't know anything about this constraint (except possibly through gradients), so it is entirely possible that the sampler will propose an invalid value, leading to an exception, particularly if the step size is large.
With the Hamiltonian Monte Carlo sampler, we may be able to work around the problem by using a very small step size, since the gradient should keep the parameters away from invalid regions, but small step sizes mean slow convergence. With a Metropolis-Hastings sampler, which doesn't know anything about gradients, we're doomed.
# Version 2: reparametrizing to unconstrained parameters
There is a straightforward solution to the problem above: we can reparametrize our model such that the new parameters no longer have these constraints. TFP provides a useful set of tools - bijectors - for doing just that.
### Reparameterization with bijectors
Our precision matrix must be real and symmetric; we want an alternative parameterization that doesn't have these constraints. A starting point is a Cholesky factorization of the precision matrix. The Cholesky factors are still constrained - they are lower triangular, and their diagonal elements must be positive. However, if we take the log of the diagonals of the Cholesky factor, the logs are no longer are constrained to be positive, and then if we flatten the lower triangular portion into a 1-D vector, we no longer have the lower triangular constraint. The result in our case will be a length 3 vector with no constraints.
(The [Stan manual](http://mc-stan.org/users/documentation/) has a great chapter on using transformations to remove various types of constraints on parameters.)
This reparameterization has little effect on our data log likelihood function - we just have to invert our transformation so we get back the precision matrix - but the effect on the prior is more complicated. We've specified that the probability of a given precision matrix is given by the Wishart distribution; what is the probability of our transformed matrix?
Recall that if we apply a monotonic function $g$ to a 1-D random variable $X$, $Y = g(X)$, the density for $Y$ is given by
$$
f_Y(y) = | \frac{d}{dy}(g^{-1}(y)) | f_X(g^{-1}(y))
$$
The derivative of $g^{-1}$ term accounts for the way that $g$ changes local volumes. For higher dimensional random variables, the corrective factor is the absolute value of the determinant of the Jacobian of $g^{-1}$ (see [here](https://en.wikipedia.org/wiki/Probability_density_function#Dependent_variables_and_change_of_variables)).
We'll have to add a Jacobian of the inverse transform into our log prior likelihood function. Happily, TFP's `Bijector` class can take care of this for us.
The [`Bijector`](https://www.tensorflow.org/api_docs/python/tf/distributions/bijectors/Bijector) class is used to represent invertible, smooth functions used for changing variables in probability density functions. Bijectors all have a `forward()` method that performs a transform, an `inverse()` method that inverts it, and `forward_log_det_jacobian()` and `inverse_log_det_jacobian()` methods that provide the Jacobian corrections we need when we reparaterize a pdf.
TFP provides a collection of useful bijectors that we can combine through composition via the [`Chain`](https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/Chain) operator to form quite complicated transforms. In our case, we'll compose the following 3 bijectors (the operations in the chain are performed from right to left):
1. The first step of our transform is to perform a Cholesky factorization on the precision matrix. There isn't a Bijector class for that; however, the [`CholeskyOuterProduct`](https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/CholeskyOuterProduct) bijector takes the product of 2 Cholesky factors. We can use the inverse of that operation using the [`Invert`](https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/Invert) operator.
2. The next step is to take the log of the diagonal elements of the Cholesky factor. We accomplish this via the `TransformDiagonal` bijector and the inverse of the [`Exp`](https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/Exp) bijector.
3. Finally we flatten the lower triangular portion of the matrix to a vector using the inverse of the `FillTriangular` bijector.
```
# Our transform has 3 stages that we chain together via composition:
precision_to_unconstrained = tfb.Chain([
# step 3: flatten the lower triangular portion of the matrix
tfb.Invert(tfb.FillTriangular(validate_args=VALIDATE_ARGS)),
# step 2: take the log of the diagonals
tfb.TransformDiagonal(tfb.Invert(tfb.Exp(validate_args=VALIDATE_ARGS))),
# step 1: decompose the precision matrix into its Cholesky factors
tfb.Invert(tfb.CholeskyOuterProduct(validate_args=VALIDATE_ARGS)),
])
# sanity checks
with tf.Graph().as_default() as g:
m = tf.constant([[1., 2.], [2., 8.]])
m_fwd = precision_to_unconstrained.forward(m)
m_inv = precision_to_unconstrained.inverse(m_fwd)
# bijectors handle tensors of values, too!
m2 = tf.stack([m, tf.eye(2)])
m2_fwd = precision_to_unconstrained.forward(m2)
m2_inv = precision_to_unconstrained.inverse(m2_fwd)
g.finalize()
with tf.Session(graph=g) as sess:
m_, m_fwd_, m_inv_ = sess.run([m, m_fwd, m_inv])
print 'single input:'
print 'm:\n', m_
print 'precision_to_unconstrained(m):\n', m_fwd_
print 'inverse(precision_to_unconstrained(m)):\n', m_inv_
print
m2_, m2_fwd_, m2_inv_ = sess.run([m2, m2_fwd, m2_inv])
print 'tensor of inputs:'
print 'm2:\n', m2_
print 'precision_to_unconstrained(m2):\n', m2_fwd_
print 'inverse(precision_to_unconstrained(m2)):\n', m2_inv_
```
The [`TransformedDistribution`](https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/TransformedDistribution) class automates the process of applying a bijector to a distribution and making the necessary Jacobian correction to `log_prob()`. Our new prior becomes:
```
def log_lik_prior_transformed(transformed_precisions):
rv_precision = tfd.TransformedDistribution(
tfd.Wishart(
df=PRIOR_DF,
scale=PRIOR_SCALE,
validate_args=VALIDATE_ARGS,
allow_nan_stats=ALLOW_NAN_STATS),
bijector=precision_to_unconstrained,
validate_args=VALIDATE_ARGS)
return rv_precision.log_prob(transformed_precisions)
# Check against the numpy implementation. Note that when comparing, we need
# to add in the Jacobian correction.
with tf.Graph().as_default() as g:
precisions = np.stack([np.eye(2, dtype=np.float32), true_precision])
transformed_precisions = precision_to_unconstrained.forward(precisions)
lik_tf = log_lik_prior_transformed(transformed_precisions)
corrections = precision_to_unconstrained.inverse_log_det_jacobian(
transformed_precisions, event_ndims=1)
n = precisions.shape[0]
g.finalize()
with tf.Session(graph=g) as sess:
lik_tf_, corrections_ = sess.run([lik_tf, corrections])
for i in range(n):
print i
print 'numpy:', log_lik_prior_numpy(precisions[i]) + corrections_[i]
print 'tensorflow:', lik_tf_[i]
```
We just need to invert the transform for our data log likelihood:
`precision = precision_to_unconstrained.inverse(transformed_precision)
`
Since we actually want the Cholesky factorization of the precision matrix, it would be more efficient to do just a partial inverse here. However, we'll leave optimization for later and will leave the partial inverse as an exercise for the reader.
```
def log_lik_data_transformed(transformed_precisions, replicated_data):
# We recover the precision matrix by inverting our bijector. This is
# inefficient since we really want the Cholesky decomposition of the
# precision matrix, and the bijector has that in hand during the inversion,
# but we'll worry about efficiency later.
n = tf.shape(transformed_precisions)[0]
precisions = precision_to_unconstrained.inverse(transformed_precisions)
precisions_cholesky = tf.cholesky(precisions)
covariances = tf.cholesky_solve(precisions_cholesky,
tf.eye(2, batch_shape=[n]))
rv_data = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros([n, 2]),
covariance_matrix=covariances,
validate_args=VALIDATE_ARGS,
allow_nan_stats=ALLOW_NAN_STATS)
return tf.reduce_sum(rv_data.log_prob(replicated_data), axis=0)
# sanity check
with tf.Graph().as_default() as g:
precisions = np.stack([np.eye(2, dtype=np.float32), true_precision])
transformed_precisions = precision_to_unconstrained.forward(precisions)
lik_tf = log_lik_data_transformed(transformed_precisions, replicated_data)
g.finalize()
with tf.Session(graph=g) as sess:
lik_tf_ = sess.run(lik_tf)
for i in range(precisions.shape[0]):
print i
print 'numpy:', log_lik_data_numpy(precisions[i], my_data)
print 'tensorflow:', lik_tf_[i]
```
Again we wrap our new functions in a closure.
```
def get_log_lik_transformed(data, n_chains=1):
# The data argument that is passed in will be available to the inner function
# below so it doesn't have to be passed in as a parameter.
replicated_data = np.tile(np.expand_dims(data, axis=1), reps=[1, n_chains, 1])
def _log_lik_transformed(transformed_precisions):
return (log_lik_data_transformed(transformed_precisions, replicated_data) +
log_lik_prior_transformed(transformed_precisions))
return _log_lik_transformed
# make sure everything runs
with tf.Graph().as_default() as g:
log_lik_fn = get_log_lik_transformed(my_data)
m = tf.expand_dims(tf.eye(2), axis=0)
lik = log_lik_fn(precision_to_unconstrained.forward(m))
g.finalize()
with tf.Session(graph=g) as sess:
print sess.run(lik)
```
## Sampling
Now that we don't have to worry about our sampler blowing up because of invalid parameter values, let's generate some real samples.
The sampler works with the unconstrained version of our parameters, so we need to transform our initial value to its unconstrained version. The samples that we generate will also all be in their unconstrained form, so we need to transform them back. Bijectors are vectorized, so it's easy to do so.
```
# We'll choose a proper random initial value this time
np.random.seed(123)
initial_value_cholesky = np.array(
[[0.5 + np.random.uniform(), 0.0],
[-0.5 + np.random.uniform(), 0.5 + np.random.uniform()]],
dtype=np.float32)
initial_value = np.expand_dims(
initial_value_cholesky.dot(initial_value_cholesky.T), axis=0)
# The sampler works with unconstrained values, so we'll transform our initial
# value
with tf.Graph().as_default() as g:
initial_value_transformed = precision_to_unconstrained.forward(initial_value)
g.finalize()
with tf.Session(graph=g) as sess:
initial_value_transformed_ = sess.run(initial_value_transformed)
# Sample!
with tf.Graph().as_default() as g:
tf.set_random_seed(123)
log_lik_fn = get_log_lik_transformed(my_data, n_chains=1)
num_results = 1000
num_burnin_steps = 1000
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=[
initial_value_transformed_,
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_lik_fn,
step_size=0.1,
num_leapfrog_steps=3,
seed=123),
parallel_iterations=1)
# transform samples back to their constrained form
precision_samples = precision_to_unconstrained.inverse(states)
g.finalize()
with tf.Session(graph=g) as sess:
states_, precision_samples_, kernel_results_ = sess.run(
[states, precision_samples, kernel_results])
```
Let's compare the mean of our sampler's output to the analytic posterior mean!
```
print 'True posterior mean:\n', posterior_mean
print 'Sample mean:\n', np.mean(np.reshape(precision_samples_, [-1, 2, 2]), axis=0)
```
We're way off! Let's figure out why. First let's look at our samples.
```
np.reshape(precision_samples_, [-1, 2, 2])
```
Uh oh - it looks like they all have the same value. Let's figure out why.
The `kernel_results_` variable is a named tuple that gives information about the sampler at each state. The `is_accepted` field is the key here.
```
# Look at the acceptance for the last 100 samples
print np.squeeze(kernel_results_.is_accepted)[-100:]
print 'Fraction of samples accepted:', np.mean(np.squeeze(kernel_results_.is_accepted))
```
All our samples were rejected! Presumably our step size was too big. I chose `stepsize=0.1` purely arbitrarily.
# Version 3: sampling with an adaptive step size
Since sampling with my arbitrary choice of step size failed, we have a few agenda items:
1. implement an adaptive step size, and
2. perform some convergence checks.
There is some nice sample code in `tensorflow_probability/python/mcmc/hmc.py` for implementing adaptive step sizes. I've adapted it below.
Note that there's a separate `sess.run()` statement for each step. This is really helpful for debugging, since it allows us to easily add some per-step diagnostics if need be. For example, we can show incremental progress, time each step, etc.
**Tip:** One apparently common way to mess up your sampling is to have your graph grow in the loop. (The reason for finalizing the graph before the session is run is to prevent just such problems.) If you haven't been using finalize(), though, a useful debugging check if your code slows to a crawl is to print out the graph size at each step via `len(mygraph.get_operations())` - if the length increases, you're probably doing something bad.
We're going to run 3 independent chains here. Doing some comparisons between the chains will help us check for convergence.
```
# The number of chains is determined by the shape of the initial values.
# Here we'll generate 3 chains, so we'll need a tensor of 3 initial values.
N_CHAINS = 3
np.random.seed(123)
initial_values = []
for i in range(N_CHAINS):
initial_value_cholesky = np.array(
[[0.5 + np.random.uniform(), 0.0],
[-0.5 + np.random.uniform(), 0.5 + np.random.uniform()]],
dtype=np.float32)
initial_values.append(initial_value_cholesky.dot(initial_value_cholesky.T))
initial_values = np.stack(initial_values)
# Transform our initial values to their unconstrained form
# (Transforming the value in its own session is a workaround for b/72831017)
with tf.Graph().as_default() as g:
initial_values_transformed = precision_to_unconstrained.forward(
initial_values)
g.finalize()
with tf.Session(graph=g) as sess:
initial_values_transformed_ = sess.run(initial_values_transformed)
# Code adapted from tensorflow_probability/python/mcmc/hmc.py
with tf.Graph().as_default() as g:
tf.set_random_seed(123)
log_lik_fn = get_log_lik_transformed(my_data)
# Tuning acceptance rates:
dtype = np.float32
num_warmup_iter = 2500
num_chain_iter = 2500
# Set the target average acceptance ratio for the HMC as suggested by
# Beskos et al. (2013):
# https://projecteuclid.org/download/pdfview_1/euclid.bj/1383661192
target_accept_rate = 0.651
x = tf.get_variable(name='x', initializer=initial_values_transformed_)
step_size = tf.get_variable(name='step_size',
initializer=tf.constant(0.01, dtype=dtype))
# Initialize the HMC sampler.
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_lik_fn,
step_size=step_size,
num_leapfrog_steps=3)
# One iteration of the HMC
next_x, other_results = hmc.one_step(
current_state=x,
previous_kernel_results=hmc.bootstrap_results(x))
x_update = x.assign(next_x)
precision = precision_to_unconstrained.inverse(x_update)
# Adapt the step size using standard adaptive MCMC procedure. See Section 4.2
# of Andrieu and Thoms (2008):
# http://www4.ncsu.edu/~rsmith/MA797V_S12/Andrieu08_AdaptiveMCMC_Tutorial.pdf
# NOTE: One important change we need to make from the hmc.py version is to
# combine the log_accept_ratio values from the different chains when
# deciding how to update the step size. Here we use the mean
# log_accept_ratio to decide.
step_size_update = step_size.assign_add(
step_size * tf.where(
tf.exp(tf.minimum(tf.reduce_mean(
other_results.log_accept_ratio), 0.)) >
target_accept_rate,
x=0.1, y=-0.1))
# Note, the adaptations are performed during warmup only.
warmup = tf.group([x_update, step_size_update])
init = tf.global_variables_initializer()
g.finalize()
with tf.Session(graph=g) as sess:
# Initialize variables
sess.run(init)
# Warm up the sampler and adapt the step size
print 'Warmup'
start_time = time.time()
for i in range(num_warmup_iter):
sess.run(warmup)
if i % 500 == 0:
print 'Step %d' % i
end_time = time.time()
print 'Time per step:', (end_time - start_time) / num_warmup_iter
print 'Step size: %g' % sess.run(step_size)
# Collect samples without adapting step size
print 'Sampling'
start_time = time.time()
packed_samples = np.zeros([num_chain_iter, N_CHAINS, 3])
precision_samples = np.zeros([num_chain_iter, N_CHAINS, 2, 2])
results = []
for i in range(num_chain_iter):
_, x_, precision_, other_results_ = sess.run(
[x_update, x, precision, other_results])
packed_samples[i, :] = x_
precision_samples[i, :] = precision_
results.append(other_results_)
if i % 500 == 0:
print 'Step %d' % i
end_time = time.time()
print 'Time per step:', (end_time - start_time) / num_chain_iter
```
A quick check: our acceptance rate during our sampling is close to our target of 0.651.
```
is_accepted = np.array([r.is_accepted for r in results])
print np.mean(is_accepted)
precision_samples_reshaped = np.reshape(precision_samples, [-1, 2, 2])
```
Even better, our sample mean and standard deviation are close to what we expect from the analytic solution.
```
print 'True posterior mean:\n', posterior_mean
print 'Mean of samples:\n', np.mean(precision_samples_reshaped, axis=0)
print 'True posterior standard deviation:\n', posterior_sd
print 'Standard deviation of samples:\n', np.std(precision_samples_reshaped, axis=0)
```
## Checking for convergence
In general we won't have an analytic solution to check against, so we'll need to make sure the sampler has converged. One standard check is the Gelman-Rubin $\hat{R}$ statistic, which requires multiple sampling chains. $\hat{R}$ measures the degree to which variance (of the means) between chains exceeds what one would expect if the chains were identically distributed. Values of $\hat{R}$ close to 1 are used to indicate approximate convergence. See [the source](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/mcmc/diagnostic.py#L205) for details.
```
with tf.Graph().as_default() as g:
r_hat = tfp.mcmc.potential_scale_reduction(precision_samples)
g.finalize()
with tf.Session(graph=g) as sess:
print sess.run(r_hat)
```
## Model criticism
If we didn't have an analytic solution, this would be the time to do some real model criticism.
Here are a few quick histograms of the sample components relative to our ground truth (in red). Note that the samples have been shrunk from the sample precision matrix values toward the identity matrix prior.
```
fig, axes = plt.subplots(2, 2, sharey=True)
fig.set_size_inches(8, 8)
for i in range(2):
for j in range(2):
ax = axes[i, j]
ax.hist(precision_samples_reshaped[:, i, j])
ax.axvline(true_precision[i, j], color='red',
label='True precision')
ax.axvline(sample_precision[i, j], color='red', linestyle=':',
label='Sample precision')
ax.set_title('precision[%d, %d]' % (i, j))
plt.tight_layout()
plt.legend()
plt.show()
```
Some scatterplots of pairs of precision components show that because of the correlation structure of the posterior, the true posterior values are not as unlikely as they appear from the marginals above.
```
fig, axes = plt.subplots(4, 4)
fig.set_size_inches(12, 12)
for i1 in range(2):
for j1 in range(2):
index1 = 2 * i1 + j1
for i2 in range(2):
for j2 in range(2):
index2 = 2 * i2 + j2
ax = axes[index1, index2]
ax.scatter(precision_samples_reshaped[:, i1, j1],
precision_samples_reshaped[:, i2, j2], alpha=0.1)
ax.axvline(true_precision[i1, j1], color='red')
ax.axhline(true_precision[i2, j2], color='red')
ax.axvline(sample_precision[i1, j1], color='red', linestyle=':')
ax.axhline(sample_precision[i2, j2], color='red', linestyle=':')
ax.set_title('(%d, %d) vs (%d, %d)' % (i1, j1, i2, j2))
plt.tight_layout()
plt.show()
```
# Version 4: simpler sampling of constrained parameters
Bijectors made sampling the precision matrix straightforward, but there was a fair amount of manual converting to and from the unconstrained representation. There is an easier way!
### The TransformedTransitionKernel
The `TransformedTransitionKernel` simplifies this process. It wraps your sampler and handles all the conversions. It takes as an argument a list of bijectors that map unconstrained parameter values to constrained ones. So here we need the inverse of the `precision_to_unconstrained` bijector we used above. We could just use `tfb.Invert(precision_to_unconstrained)`, but that would involve taking of inverses of inverses (TensorFlow isn't smart enough to simplify `tf.Invert(tf.Invert())` to `tf.Identity())`, so instead we'll just write a new bijector.
### Constraining bijector
```
# The bijector we need for the TransformedTransitionKernel is the inverse of
# the one we used above
unconstrained_to_precision = tfb.Chain([
# step 3: take the product of Cholesky factors
tfb.CholeskyOuterProduct(validate_args=VALIDATE_ARGS),
# step 2: exponentiate the diagonals
tfb.TransformDiagonal(tfb.Exp(validate_args=VALIDATE_ARGS)),
# step 3: map a vector to a lower triangular matrix
tfb.FillTriangular(validate_args=VALIDATE_ARGS),
])
# quick sanity check
with tf.Graph().as_default() as g:
m = tf.constant([[1., 2.], [2., 8.]])
m_inv = unconstrained_to_precision.inverse(m)
m_fwd = unconstrained_to_precision.forward(m_inv)
g.finalize()
with tf.Session(graph=g) as sess:
m_, m_inv_, m_fwd_ = sess.run([m, m_inv, m_fwd])
print 'm:\n', m_
print 'unconstrained_to_precision.inverse(m):\n', m_inv_
print 'forward(unconstrained_to_precision.inverse(m)):\n', m_fwd_
```
## Sampling with the TransformedTransitionKernel
With the `TransformedTransitionKernel`, we no longer have to do manual transformations of our parameters. Our initial values and our samples are all precision matrices; we just have to pass in our unconstraining bijector(s) to the kernel and it takes care of all the transformations.
```
# Code adapted from tensorflow_probability/python/mcmc/hmc.py
with tf.Graph().as_default() as g:
tf.set_random_seed(123)
log_lik_fn = get_log_lik(my_data)
# Tuning acceptance rates:
dtype = np.float32
num_warmup_iter = 2500
num_chain_iter = 2500
# Set the target average acceptance ratio for the HMC as suggested by
# Beskos et al. (2013):
# https://projecteuclid.org/download/pdfview_1/euclid.bj/1383661192
target_accept_rate = 0.651
x = tf.get_variable(name='x', initializer=initial_values)
step_size = tf.get_variable(name='step_size', initializer=tf.constant(0.01, dtype=dtype))
# Initialize the HMC sampler, now wrapped in the TransformedTransitionKernel
ttk = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_lik_fn,
step_size=step_size,
num_leapfrog_steps=3),
bijector=[unconstrained_to_precision])
# One iteration
next_x, other_results = ttk.one_step(
current_state=x,
previous_kernel_results=ttk.bootstrap_results(x))
x_update = x.assign(next_x)
# Adapt the step size using standard adaptive MCMC procedure. See Section 4.2
# of Andrieu and Thoms (2008):
# http://www4.ncsu.edu/~rsmith/MA797V_S12/Andrieu08_AdaptiveMCMC_Tutorial.pdf
# NOTE: one change from above is that we have to look at
# other_results.inner_results.log_accept_ratio, since the new kernel
# wraps the results from the HMC kernel.
step_size_update = step_size.assign_add(
step_size * tf.where(
tf.exp(tf.minimum(tf.reduce_mean(
other_results.inner_results.log_accept_ratio), 0.)) >
target_accept_rate,
x=0.1, y=-0.1))
# Note, the adaptations are performed during warmup only.
warmup = tf.group([x_update, step_size_update])
init = tf.global_variables_initializer()
g.finalize()
with tf.Session(graph=g) as sess:
# Initialize
sess.run(init)
# Warm up the sampler and adapt the step size
print 'Warmup'
start_time = time.time()
for i in range(num_warmup_iter):
sess.run(warmup)
if i % 500 == 0:
print 'Step %d' % i
end_time = time.time()
print 'Time per step:', (end_time - start_time) / num_warmup_iter
print 'Step size: %g' % sess.run(step_size)
# Collect samples without adapting step size
print 'Sampling'
start_time = time.time()
precision_samples = np.zeros([num_chain_iter, N_CHAINS, 2, 2])
results = []
for i in range(num_chain_iter):
_, x_, other_results_ = sess.run([x_update, x, other_results])
precision_samples[i, :] = x_
results.append(other_results_)
if i % 500 == 0:
print 'Step %d' % i
end_time = time.time()
print 'Time per step:', (end_time - start_time) / num_chain_iter
```
### Checking convergence
The $\hat{R}$ convergence check looks good!
```
with tf.Graph().as_default() as g:
r_hat = tfp.mcmc.potential_scale_reduction(precision_samples)
g.finalize()
with tf.Session(graph=g) as sess:
print sess.run(r_hat)
```
### Comparison against the analytic posterior
Again let's check against the analytic posterior.
```
# The output samples have shape [n_steps, n_chains, 2, 2]
# Flatten them to [n_steps * n_chains, 2, 2] via reshape:
precision_samples_reshaped = np.reshape(precision_samples, [-1, 2, 2])
print 'True posterior mean:\n', posterior_mean
print 'Mean of samples:\n', np.mean(precision_samples_reshaped, axis=0)
print 'True posterior standard deviation:\n', posterior_sd
print 'Standard deviation of samples:\n', np.std(precision_samples_reshaped, axis=0)
```
# Optimizations
Now that we've got things running end-to-end, let's do a more optimized version. Speed doesn't matter too much for this example, but once matrices get larger, a few optimizations will make a big difference.
One big speed improvement we can make is to reparameterize in terms of the Cholesky decomposition. The reason is our data likelihood function requires both the covariance and the precision matrices. Matrix inversion is expensive ($O(n^3)$ for an $n \times n$ matrix), and if we parameterize in terms of either the covariance or the precision matrix, we need to do an inversion to get the other.
As a reminder, a real, positive-definite, symmetric matrix $M$ can be decomposed into a product of the form $M = L L^T$ where the matrix $L$ is lower triangular and has positive diagonals. Given the Cholesky decomposition of $M$, we can more efficiently obtain both $M$ (the product of a lower and an upper triangular matrix) and $M^{-1}$ (via back-substitution). The Cholesky factorization itself is not cheap to compute, but if we parameterize in terms of Cholesky factors, we only need to compute the Choleksy factorization of the initial parameter values.
## Using the Cholesky decomposition of the covariance matrix
TFP has a version of the multivariate normal distribution, [MultivariateNormalTriL](https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/MultivariateNormalTriL), that is parameterized in terms of the Cholesky factor of the covariance matrix. So if we were to parameterize in terms of the Cholesky factor of the covariance matrix, we could compute the data log likelihood efficiently. The challenge is in computing the prior log likelihood with similar efficiency.
If we had a version of the inverse Wishart distribution that worked with Cholesky factors of samples, we'd be all set. Alas, we don't. (The team would welcome code submissions, though!) As an alternative, we can use a version of the Wishart distribution that works with Cholesky factors of samples together with a chain of bijectors.
At the moment, we're missing a few stock bijectors to make things really efficient, but I want to show the process as an exercise and a useful illustration of the power of TFP's bijectors.
### A Wishart distribution that operates on Cholesky factors
The `Wishart` distribution has a useful flag, `input_output_cholesky`, that specifies that the input and output matrices should be Cholesky factors. It's more efficient and numerically advantageous to work with the Cholesky factors than full matrices, which is why this is desirable. An important point about the semantics of the flag: it's only an indication that the representation of the input and output to the distribution should change - it does *not* indicate a full reparameterization of the distribution, which would involve a Jacobian correction to the `log_prob()` function. We actually want to do this full reparameterization, so we'll build our own distribution.
```
# An optimized Wishart distribution that has been transformed to operate on
# Cholesky factors instead of full matrices. Note that we gain a modest
# additional speedup by specifying the Cholesky factor of the scale matrix
# (i.e. by passing in the scale_tril parameter instead of scale).
class CholeskyWishart(tfd.TransformedDistribution):
"""Wishart distribution reparameterized to use Cholesky factors."""
def __init__(self,
df,
scale_tril,
validate_args=False,
allow_nan_stats=True,
name='CholeskyWishart'):
# Wishart has a bunch of methods that we want to support but not
# implement. We'll subclass TransformedDistribution here to take care of
# those. We'll override the few for which speed is critical and implement
# them with a separate Wishart for which input_output_cholesky=True
super(CholeskyWishart, self).__init__(
distribution=tfd.Wishart(
df=df,
scale_tril=scale_tril,
input_output_cholesky=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats),
bijector=tfb.Invert(tfb.CholeskyOuterProduct()),
validate_args=validate_args,
name=name
)
# Here's the Cholesky distribution we'll use for log_prob() and sample()
self.cholesky = tfd.Wishart(
df=df,
scale_tril=scale_tril,
input_output_cholesky=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
def _log_prob(self, x):
return (self.cholesky.log_prob(x) +
self.bijector.inverse_log_det_jacobian(x, event_ndims=2))
def _sample_n(self, n, seed=None):
return self.cholesky._sample_n(n, seed)
# some checks
PRIOR_SCALE_CHOLESKY = np.linalg.cholesky(PRIOR_SCALE)
with tf.Graph().as_default() as g:
w_transformed = tfd.TransformedDistribution(
tfd.Wishart(df=PRIOR_DF, scale_tril=PRIOR_SCALE_CHOLESKY),
bijector=tfb.Invert(tfb.CholeskyOuterProduct()))
w_optimized = CholeskyWishart(
df=PRIOR_DF, scale_tril=PRIOR_SCALE_CHOLESKY)
m = tf.placeholder(dtype=tf.float32)
log_prob_transformed = w_transformed.log_prob(m)
log_prob_optimized = w_optimized.log_prob(m)
g.finalize()
with tf.Session(graph=g) as sess:
for matrix in [np.eye(2, dtype=np.float32),
np.array([[1., 0.], [2., 8.]], dtype=np.float32)]:
log_prob_transformed_, log_prob_optimized_ = sess.run(
[log_prob_transformed, log_prob_optimized],
feed_dict={m: matrix})
print 'Transformed Wishart:', log_prob_transformed_
print 'Optimized Wishart', log_prob_optimized_
```
### Building an inverse Wishart distribution
We have our covariance matrix $C$ decomposed into $C = L L^T$ where $L$ is lower triangular and has a positive diagonal. We want to know the probability of $L$ given that $C \sim W^{-1}(\nu, V)$ where $W^{-1}$ is the inverse Wishart distribution.
The inverse Wishart distribution has the property that if $C \sim W^{-1}(\nu, V)$, then the precision matrix $C^{-1} \sim W(\nu, V^{-1})$. So we can get the probability of $L$ via a `TransformedDistribution` that takes as parameters the Wishart distribution and a bijector that maps the Cholesky factor of precision matrix to a Cholesky factor of its inverse.
A straightforward (but not super efficient) way to get from the Cholesky factor of $C^{-1}$ to $L$ is to invert the Cholesky factor by back-solving, then forming the covariance matrix from these inverted factors, and then doing a Cholesky factorization.
Let the Cholesky decomposition of $C^{-1} = M M^T$. $M$ is lower triangular, so we can invert it using the `MatrixInverseTriL` bijector.
Forming $C$ from $M^{-1}$ is a little tricky: $C = (M M^T)^{-1} = M^{-T}M^{-1} = M^{-T} (M^{-T})^T$. $M$ is lower triangular, so $M^{-1}$ will also be lower triangular, and $M^{-T}$ will be upper triangular. The `CholeskyOuterProduct()` bijector only works with lower triangular matrices, so we can't use it to form $C$ from $M^{-T}$. Our workaround is a chain of bijectors that permute the rows and columns of a matrix.
```
# Here's our permuting bijector:
def get_permuter():
permutation = [1, 0]
return tfb.Chain([
tfb.Transpose(rightmost_transposed_ndims=2),
tfb.Permute(permutation=permutation),
tfb.Transpose(rightmost_transposed_ndims=2),
tfb.Permute(permutation=permutation),
])
# Some sanity checks
with tf.Graph().as_default() as g:
m = np.array([[1., 0.], [2., 8.]], dtype=np.float32)
permuter = get_permuter()
p_fwd = permuter.forward(m)
p_fwd_fwd = permuter.forward(p_fwd)
g.finalize()
with tf.Session(graph=g) as sess:
print 'm =\n', m
print 'permuted = \n', sess.run(p_fwd)
print 'permuted^2 = \n', sess.run(p_fwd_fwd)
```
### Combining all the pieces
Our final bijector is now a big chain:
```
def get_wishart_cholesky_to_iw_cholesky():
return tfb.Chain([
# step 6: get the Cholesky factor for the covariance matrix
tfb.Invert(tfb.CholeskyOuterProduct()),
# step 5: undo our permutation (note that permuter.inverse = permuter.forward)
get_permuter(),
# step 4: form the covariance matrix from the inverted Cholesky factors
tfb.CholeskyOuterProduct(),
# step 3: make things lower triangular
get_permuter(),
# step 2: transpose the inverse
tfb.Transpose(rightmost_transposed_ndims=2),
# step 1: invert the Cholesky factor (see code below)
tfb.MatrixInverseTriL()
])
# verify that the bijector works
with tf.Graph().as_default() as g:
m = np.array([[1., 0.], [2., 8.]], dtype=np.float32)
c_inv = m.dot(m.T)
c = np.linalg.inv(c_inv)
c_chol = np.linalg.cholesky(c)
wishart_cholesky_to_iw_cholesky = get_wishart_cholesky_to_iw_cholesky()
w_fwd = wishart_cholesky_to_iw_cholesky.forward(m)
g.finalize()
with tf.Session(graph=g) as sess:
print 'numpy =\n', c_chol
print 'bijector =\n', sess.run(w_fwd)
```
### Our final distribution
Our inverse Wishart operating on Cholesky factors is as follows:
```
inverse_wishart_cholesky = tfd.TransformedDistribution(
distribution=CholeskyWishart(
df=PRIOR_DF,
scale_tril=np.linalg.cholesky(np.linalg.inv(PRIOR_SCALE))),
bijector=get_wishart_cholesky_to_iw_cholesky())
```
We've got our inverse Wishart, but it's kind of slow because we have to do a Cholesky decomposition in the bijector. Let's return to the precision matrix parameterization and see what we can do there for optimization.
# Final(!) Version: using the Cholesky decomposition of the precision matrix
An alternative approach is to work with Cholesky factors of the precision matrix. Here the prior likelihood function is easy to compute, but the data log likelihood function takes more work since TFP doesn't have a version of the multivariate normal that is parameterized by precision.
### Optimized prior log likelihood
We use the `CholeskyWishart` distribution we built above to construct the prior.
```
# Our new prior.
PRIOR_SCALE_CHOLESKY = np.linalg.cholesky(PRIOR_SCALE)
def log_lik_prior_cholesky(precisions_cholesky):
rv_precision = CholeskyWishart(
df=PRIOR_DF,
scale_tril=PRIOR_SCALE_CHOLESKY,
validate_args=VALIDATE_ARGS,
allow_nan_stats=ALLOW_NAN_STATS)
return rv_precision.log_prob(precisions_cholesky)
# Check against the slower TF implementation and the NumPy implementation.
# Note that when comparing to NumPy, we need to add in the Jacobian correction.
with tf.Graph().as_default() as g:
precisions = [np.eye(2, dtype=np.float32),
true_precision]
precisions_cholesky = np.stack([np.linalg.cholesky(m) for m in precisions])
precisions = np.stack(precisions)
lik_tf = log_lik_prior_cholesky(precisions_cholesky)
lik_tf_slow = tfd.TransformedDistribution(
distribution=tfd.Wishart(df=PRIOR_DF, scale=PRIOR_SCALE),
bijector=tfb.Invert(tfb.CholeskyOuterProduct())).log_prob(
precisions_cholesky)
corrections = tfb.Invert(tfb.CholeskyOuterProduct()).inverse_log_det_jacobian(
precisions_cholesky, event_ndims=2)
n = precisions.shape[0]
g.finalize()
with tf.Session(graph=g) as sess:
lik_tf_, lik_tf_slow_, corrections_ = sess.run(
[lik_tf, lik_tf_slow, corrections])
for i in range(n):
print i
print 'numpy:', log_lik_prior_numpy(precisions[i]) + corrections_[i]
print 'tensorflow slow:', lik_tf_slow_[i]
print 'tensorflow fast:', lik_tf_[i]
```
### Optimized data log likelihood
We can use TFP's bijectors to build our own version of the multivariate normal. Here is the key idea:
Suppose I have a column vector $X$ whose elements are iid samples of $N(0, 1)$. We have $\text{mean}(X) = 0$ and $\text{cov}(X) = I$
Now let $Y = A X + b$. We have $\text{mean}(Y) = b$ and $\text{cov}(Y) = A A^T$
Hence we can make vectors with mean $b$ and covariance $C$ using the affine transform $Ax+b$ to vectors of iid standard Normal samples provided $A A^T = C$. The Cholesky decomposition of $C$ has the desired property. However, there are other solutions.
Let $P = C^{-1}$ and let the Cholesky decomposition of $P$ be $B$, i.e. $B B^T = P$. Now
$P^{-1} = (B B^T)^{-1} = B^{-T} B^{-1} = B^{-T} (B^{-T})^T$
So another way to get our desired mean and covariance is to use the affine transform $Y=B^{-T}X + b$.
Our approach (courtesy of [this notebook](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Bayesian_Gaussian_Mixture_Model.ipynb)):
1. Use `tfd.Independent()` to combine a batch of 1-D `Normal` random variables into a single multi-dimensional random variable. The `reinterpreted_batch_ndims` parameter for `Independent()` specifies the number of batch dimensions that should be reinterpreted as event dimensions. In our case we create a 1-D batch of length 2 that we transform into a 1-D event of length 2, so `reinterpreted_batch_ndims=1`.
2. Apply a bijector to add the desired covariance: `tfb.Invert(tfb.Affine(scale_tril=precision_cholesky, adjoint=True))`. Note that above we're multiplying our iid normal random variables by the transpose of the inverse of the Cholesky factor of the precision matrix $(B^{-T}X)$. The `tfb.Invert` takes care of inverting $B$, and the `adjoint=True` flag performs the transpose.
3. Apply a bijector to add the desired offset: `tfb.Affine(shift=shift)` Note that we have to do the shift as a separate step from the initial inverted affine transform because otherwise the inverted scale is applied to the shift (since the inverse of $y=Ax+b$ is $x=A^{-1}y - A^{-1}b$).
```
class MVNPrecisionCholesky(tfd.TransformedDistribution):
"""Multivariate normal parametrized by loc and Cholesky precision matrix."""
def __init__(self, loc, precision_cholesky, name=None):
super(MVNPrecisionCholesky, self).__init__(
distribution=tfd.Independent(
tfd.Normal(loc=tf.zeros_like(loc),
scale=tf.ones_like(loc)),
reinterpreted_batch_ndims=1),
bijector=tfb.Chain([
tfb.Affine(shift=loc),
tfb.Invert(tfb.Affine(scale_tril=precision_cholesky,
adjoint=True)),
]),
name=name)
def log_lik_data_cholesky(precisions_cholesky, replicated_data):
n = tf.shape(precisions_cholesky)[0] # number of precision matrices
rv_data = MVNPrecisionCholesky(
loc=tf.zeros([n, 2]),
precision_cholesky=precisions_cholesky)
return tf.reduce_sum(rv_data.log_prob(replicated_data), axis=0)
# check against the numpy implementation
with tf.Graph().as_default() as g:
true_precision_cholesky = np.linalg.cholesky(true_precision)
precisions = [np.eye(2, dtype=np.float32), true_precision]
precisions_cholesky = np.stack([np.linalg.cholesky(m) for m in precisions])
precisions = np.stack(precisions)
n = precisions_cholesky.shape[0]
replicated_data = np.tile(np.expand_dims(my_data, axis=1), reps=[1, 2, 1])
lik_tf = log_lik_data_cholesky(precisions_cholesky, replicated_data)
g.finalize()
with tf.Session(graph=g) as sess:
lik_tf_ = sess.run(lik_tf)
for i in range(n):
print i
print 'numpy:', log_lik_data_numpy(precisions[i], my_data)
print 'tensorflow:', lik_tf_[i]
```
### Combined log likelihood function
Now we combine our prior and data log likelihood functions in a closure.
```
def get_log_lik_cholesky(data, n_chains=1):
# The data argument that is passed in will be available to the inner function
# below so it doesn't have to be passed in as a parameter.
replicated_data = np.tile(np.expand_dims(data, axis=1), reps=[1, n_chains, 1])
def _log_lik_cholesky(precisions_cholesky):
return (log_lik_data_cholesky(precisions_cholesky, replicated_data) +
log_lik_prior_cholesky(precisions_cholesky))
return _log_lik_cholesky
```
### Constraining bijector
Our samples are constrained to be valid Cholesky factors, which means they must be lower triangular matrices with positive diagonals. The `TransformedTransitionKernel` needs a bijector that maps unconstrained tensors to/from tensors with our desired constraints. We've removed the Cholesky decomposition from the bijector's inverse, which speeds things up.
```
unconstrained_to_precision_cholesky = tfb.Chain([
# step 2: exponentiate the diagonals
tfb.TransformDiagonal(tfb.Exp(validate_args=VALIDATE_ARGS)),
# step 1: expand the vector to a lower triangular matrix
tfb.FillTriangular(validate_args=VALIDATE_ARGS),
])
# some checks
with tf.Graph().as_default() as g:
inv = unconstrained_to_precision_cholesky.inverse(precisions_cholesky)
fwd = unconstrained_to_precision_cholesky.forward(inv)
g.finalize()
with tf.Session(graph=g) as sess:
inv_, fwd_ = sess.run([inv, fwd])
print 'precisions_cholesky:\n', precisions_cholesky
print '\ninv:\n', inv_
print '\nfwd(inv):\n', fwd_
```
### Initial values
We generate a tensor of initial values. We're working with Cholesky factors, so we generate some Cholesky factor initial values.
```
# The number of chains is determined by the shape of the initial values.
# Here we'll generate 3 chains, so we'll need a tensor of 3 initial values.
N_CHAINS = 3
np.random.seed(123)
initial_values_cholesky = []
for i in range(N_CHAINS):
initial_values_cholesky.append(np.array(
[[0.5 + np.random.uniform(), 0.0],
[-0.5 + np.random.uniform(), 0.5 + np.random.uniform()]],
dtype=np.float32))
initial_values_cholesky = np.stack(initial_values_cholesky)
```
### Sampling
We sample N_CHAINS chains using the `TransformedTransitionKernel`.
```
# Code adapted from tensorflow_probability/python/mcmc/hmc.py
with tf.Graph().as_default() as g:
tf.set_random_seed(123)
log_lik_fn = get_log_lik_cholesky(my_data)
# Tuning acceptance rates:
dtype = np.float32
num_warmup_iter = 2500
num_chain_iter = 2500
# Set the target average acceptance ratio for the HMC as suggested by
# Beskos et al. (2013):
# https://projecteuclid.org/download/pdfview_1/euclid.bj/1383661192
target_accept_rate = 0.651
x = tf.get_variable(name='x', initializer=initial_values_cholesky)
step_size = tf.get_variable(name='step_size',
initializer=tf.constant(0.01, dtype=dtype))
# Initialize the HMC sampler, now wrapped in the TransformedTransitionKernel
ttk = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_lik_fn,
step_size=step_size,
num_leapfrog_steps=3),
bijector=[unconstrained_to_precision_cholesky])
# One iteration
next_x, other_results = ttk.one_step(
current_state=x,
previous_kernel_results=ttk.bootstrap_results(x))
x_update = x.assign(next_x)
precision = tf.matmul(x, x, transpose_b=True)
# Adapt the step size using standard adaptive MCMC procedure. See Section 4.2
# of Andrieu and Thoms (2008):
# http://www4.ncsu.edu/~rsmith/MA797V_S12/Andrieu08_AdaptiveMCMC_Tutorial.pdf
# NOTE: one change from above is that we have to look at
# other_results.inner_results.log_accept_ratio, since the new kernel
# wraps the results from the HMC kernel.
step_size_update = step_size.assign_add(
step_size * tf.where(
tf.exp(tf.minimum(tf.reduce_mean(
other_results.inner_results.log_accept_ratio), 0.)) >
target_accept_rate,
x=0.1, y=-0.1))
# Note, the adaptations are performed during warmup only.
warmup = tf.group([x_update, step_size_update])
init = tf.global_variables_initializer()
g.finalize()
with tf.Session(graph=g) as sess:
# Initialize
sess.run(init)
# Warm up the sampler and adapt the step size
print 'Warmup'
start_time = time.time()
for i in range(num_warmup_iter):
sess.run(warmup)
if i % 500 == 0:
print 'Step %d' % i
end_time = time.time()
print 'Time per step:', (end_time - start_time) / num_warmup_iter
print 'Step size: %g' % sess.run(step_size)
# Collect samples without adapting step size
print 'Sampling'
start_time = time.time()
precision_samples = np.zeros([num_chain_iter, N_CHAINS, 2, 2])
results = []
for i in range(num_chain_iter):
_, precision_, other_results_ = sess.run(
[x_update, precision, other_results])
precision_samples[i, :] = precision_
results.append(other_results_)
if i % 500 == 0:
print 'Step %d' % i
end_time = time.time()
print 'Time per step:', (end_time - start_time) / num_chain_iter
```
### Convergence check
A quick convergence check looks good:
```
with tf.Graph().as_default() as g:
r_hat = tfp.mcmc.potential_scale_reduction(precision_samples)
g.finalize()
with tf.Session(graph=g) as sess:
print 'r_hat:\n', sess.run(r_hat)
```
### Comparing results to the analytic posterior
```
# The output samples have shape [n_steps, n_chains, 2, 2]
# Flatten them to [n_steps * n_chains, 2, 2] via reshape:
precision_samples_reshaped = np.reshape(precision_samples, newshape=[-1, 2, 2])
```
And again, the sample means and standard deviations match those of the analytic posterior.
```
print 'True posterior mean:\n', posterior_mean
print 'Mean of samples:\n', np.mean(precision_samples_reshaped, axis=0)
print 'True posterior standard deviation:\n', posterior_sd
print 'Standard deviation of samples:\n', np.std(precision_samples_reshaped, axis=0)
```
Ok, all done! We've got our optimized sampler working.
| github_jupyter |
# Load Image
### This code loads train & valid/test images and converts it to data frame
```
import cv2
import numpy as np
import pandas as pd
from keras.preprocessing.image import img_to_array
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import matplotlib.pyplot as plt
def LoadImage(dirPath, trainFldr, validFldr, TrainCSVName, ValidCSVName):
# Train Path
TrainPath= dirPath + '\\' + trainFldr
TrainCSVPath= dirPath + '\\' + TrainCSVName
TrainCSV= pd.read_csv(TrainCSVPath, sep= ',', names= ["Label", "Image Path"])
# TainLabel All Raws & Label
#####################################SAMPLE ONLY 1000
#TrainLabel= TrainCSV.iloc[:,0]
TrainLabel= TrainCSV.iloc[:,0]
TrainLabel= np.array(TrainLabel)
# Valid/Test Path
ValidPath= dirPath + '\\' + validFldr
ValidCSVPath= dirPath + '\\' + ValidCSVName
ValidCSV= pd.read_csv(ValidCSVPath, sep= ',', names= ["Label", "Image Path"])
# ValidLabel All Raws & Label
#####################################SAMPLE ONLY 1000
#ValidLabel= ValidCSV.iloc[:,0]
ValidLabel= ValidCSV.iloc[:,0]
ValidLabel= np.array(ValidLabel)
# Initialize train & valid/test images and labels
print('\n [INFO] laoding images...')
data=[]
#label=[]
# Load images, pre-process & store it
i=0
# i from 0 to length of TainLabel -1
#####################################SAMPLE ONLY 1000
#for i in range(len(TrainLabel)):
for i in range(len(TrainLabel)):
j=format(i, '0>5')
imagePath= str(TrainPath + "\\" + "TrIm-" + j + ".png")
image= cv2.imread(imagePath)
image=cv2.resize(image, (150, 150))
imageArr=img_to_array(image)
#image= Image.open(imagePath).convert("L")
#image= image.resize( (150, 150), 0)
#imageArr= np.asarray(image)
data.append(imageArr)
# scale the raw pixel intensities to the range [0, 1]
print('\n [INFO] scale the raw pixel...')
data = np.array(data, dtype="float") / 255.0
return data, TrainLabel;
# Load Image
Path= 'C:\\Users\\Moris\\MURA Code\\Shoulder'
TrnFldr= 'Train'
VldFldr= 'Valid'
TrnNm= 'Code-train_labeled_studies.csv'
VldNm= 'Code-valid_labeled_studies.csv'
dt, TrnLabel= LoadImage(Path, TrnFldr, VldFldr, TrnNm, VldNm)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
print('\n [INFO] partition the data...')
(trainX, testX, trainY, testY) = train_test_split(dt,TrnLabel, test_size=0.25, random_state=42)
# convert the labels from integers to vectors
trainY = to_categorical(trainY, num_classes=2)
testY = to_categorical(testY, num_classes=2)
# construct the image generator for data augmentation
#aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
# height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
# horizontal_flip=True, fill_mode="nearest")
```
# Building Keras Model
```
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.optimizers import Adam
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.preprocessing.image import ImageDataGenerator
input_shape= (150, 150, 3)
batch_size= 32
epochs= 5
classes = np.unique(trainY)
nClasses = len(classes)
model = Sequential()
#model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
#model.add(Dense(1))
#model.add(Activation('sigmoid'))
model.add(Dense(nClasses, activation='softmax'))
#model.compile(loss='binary_crossentropy',
# optimizer='rmsprop',
# metrics=['accuracy'])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow(trainX, trainY, batch_size=16)
test_generator = test_datagen.flow(testX, testY, batch_size=16)
model.fit_generator(
train_generator, steps_per_epoch= len(trainX) / batch_size,
epochs=epochs,
validation_data= test_generator,
validation_steps= len(testX) / batch_size)
model.save_weights('first_try.h5')
```
# References:
### 1-Image Classification with Keras and Deep Learning
##### LeNet is a small Convolutional Neural Network
https://www.pyimagesearch.com/2017/12/11/image-classification-with-keras-and-deep-learning/
### 2-Grayscale to RGB Conversion
##### 33% of Red, 33% of Green, 33% of Blue
##### New grayscale image = ( (0.3 * R) + (0.59 * G) + (0.11 * B) )
https://www.tutorialspoint.com/dip/grayscale_to_rgb_conversion.htm
### 3-Image Module
https://pillow.readthedocs.io/en/3.1.x/reference/Image.html
### 4-Applying Convolutional Neural Network on the MNIST dataset
https://yashk2810.github.io/Applying-Convolutional-Neural-Network-on-the-MNIST-dataset/
### 5-classifier_from_little_data_script_1.py
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d
### 6-How do I load train and test data from the local drive for a deep learning Keras model?
https://www.quora.com/How-do-I-load-train-and-test-data-from-the-local-drive-for-a-deep-learning-Keras-model
### 7-Image Preprocessing ImageDataGenerator class
https://keras.io/preprocessing/image/
### 8-Returning Multiple Values in Python
https://www.geeksforgeeks.org/g-fact-41-multiple-return-values-in-python/
### 9-Starter's Guide to building a CNN with keras (TF), openCV and google drive for image storage
https://github.com/chibuk/simple-cnn-keras-colaboratory/blob/master/Starter_s%20Guide%20to%20Convolutional%20Neural%20Networks%2C%20Part%201_%20Keras%20(TF)%20%2B%20OpenCV.ipynb
### 10-Create your first Image Recognition Classifier using CNN, Keras and Tensorflow backend
https://medium.com/nybles/create-your-first-image-recognition-classifier-using-cnn-keras-and-tensorflow-backend-6eaab98d14dd
### 11-Building powerful image classification models using very little data
https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
# Helping & Testing Code
```
from keras.preprocessing.image import img_to_array
import cv2
import matplotlib.pyplot as plt
xx= 'C:\\Users\\Moris\\MURA Code\\Shoulder\\Train\\TrIm-00011.png'
xx2= cv2.imread(xx)
xx2=cv2.resize(xx2, (120, 120))
xx2Arr=img_to_array(xx2)
#data.append(xx2)
#cv2.imshow('color_image',xx2)
imgplot = plt.imshow(xx2)
if(len(xx2.shape)<3):
print('gray')
elif len(xx2.shape)==3:
print('Color(RGB)')
else:
print('others')
#xx3= cv2.imread(xx, cv2.IMREAD_GRAYSCALE)
xx3= cv2.imread(xx, 0)
#xx3 = xx3[:,:,0]
imgplot = plt.imshow(xx3)
print(xx3)
from PIL import Image
def is_grey_scale(img_path):
im = Image.open(img_path).convert('RGB')
w,h = im.size
for i in range(w):
for j in range(h):
r,g,b = im.getpixel((i,j))
if r != g != b: return False
return True
asd1= is_grey_scale("C:\\Users\\Moris\\MURA Code\\Shoulder\\Train\\TrIm-00011.png")
print(asd1)
xx4=cv2.resize(xx2, (120, 120))
xx4=img_to_array(xx4)
yy1=xx4[:, : , 0]
print(xx4.shape)
#print(xx4)
print(yy1)
print(yy1.shape)
yy2=yy1
for k1 in range(120):
for k2 in range(120):
d1= yy1[k1, k2]
yy2[k1, k2]= 0.3*d1 + 0.59*d1 + 0.11*d1
#imgplot = plt.imshow(yy1)
imgplot = plt.imshow(yy2)
xx4= xx4.ravel()
print(xx4)
mz1= Image.open(xx).convert("L")
mz2= np.asarray(mz1)
plt.imshow(mz2, cmap='gray')
plt.show()
print(mz2.shape)
plt.imshow(mz2)
mz3=mz1.resize( (120, 120), 0)
plt.imshow(mz3)
mz4= np.asarray(mz3)
print(mz4.shape)
testY
```
| github_jupyter |
# Python 容器使用的 5 个技巧和 2 个误区
https://yq.aliyun.com/articles/719141?spm=a2c4e.11155472.0.0.5c217f8fqXNF7H
## 避免频繁扩充列表/创建新列表
- 更多的使用 yield 关键字,返回生成器对象
- 尽量使用生成器表达式替代列表推导表达式
生成器表达式: (i for in range(100))
列表推导表达式: [i for in range(100)]
- 尽量使用模块提供的懒惰对象:
使用 re.finditer 替代 re.findall
直接使用可迭代的文件对象: for line in fp,而不是 for line in fp.readlines()
## 在列表头部操作多的场景使用 deque 模块
## 使用集合/字典来判断成员是否存在
## 面向容器接口编程
```
def add_ellipsis_gen(comments, max_length=12):
"""如果可迭代评论里的内容超过 max_length,剩下的字符用省略号代替
"""
for comment in comments:
comment = comment.strip()
if len(comment) > max_length:
yield comment[:max_length] + '...'
else:
yield comment
comments = [
"Implementation note",
"Changed",
"ABC for generator",
]
print("\n".join(add_ellipsis_gen(comments)))
```
## 使用元组改善分支代码
```
# 低级写法
import time
def from_now(ts):
"""接收一个过去的时间戳,返回距离当前时间的相对时间文字描述
"""
now = time.time()
seconds_delta = int(now - ts)
if seconds_delta < 1:
return "less than 1 second ago"
elif seconds_delta < 60:
return "{} seconds ago".format(seconds_delta)
elif seconds_delta < 3600:
return "{} minutes ago".format(seconds_delta // 60)
elif seconds_delta < 3600 * 24:
return "{} hours ago".format(seconds_delta // 3600)
else:
return "{} days ago".format(seconds_delta // (3600 * 24))
now = time.time()
print(from_now(now))
print(from_now(now - 24))
print(from_now(now - 600))
print(from_now(now - 7500))
print(from_now(now - 87500))
# 高级写法
import bisect
# BREAKPOINTS 必须是已经排好序的,不然无法进行二分查找
BREAKPOINTS = (1, 60, 3600, 3600 * 24)
TMPLS = (
# unit, template
(1, "less than 1 second ago"),
(1, "{units} seconds ago"),
(60, "{units} minutes ago"),
(3600, "{units} hours ago"),
(3600 * 24, "{units} days ago"),
)
def from_now(ts):
"""接收一个过去的时间戳,返回距离当前时间的相对时间文字描述
"""
seconds_delta = int(time.time() - ts)
unit, tmpl = TMPLS[bisect.bisect(BREAKPOINTS, seconds_delta)]
return tmpl.format(units=seconds_delta // unit)
```
## 在更多地方使用动态解包
Python 3.5 以后的版本,你可以直接用 ** 运算符来快速完成字典的合并操作
```
user = {**{"name": "piglei"}, **{"movies": ["Fight Club"]}}
```
## 最好不用“获取许可”,也无需“要求原谅”
```
# AF: Ask for Forgiveness
# 要做就做,如果抛出异常了,再处理异常
def counter_af(l):
result = {}
for key in l:
try:
result[key] += 1
except KeyError:
result[key] = 1
return result
# AP: Ask for Permission
# 做之前,先问问能不能做,可以做再做
def counter_ap(l):
result = {}
for key in l:
if key in result:
result[key] += 1
else:
result[key] = 1
return result
```
整个 Python 社区对第一种 Ask for Forgiveness 的异常捕获式编程风格有着明显的偏爱。这其中有很多原因,首先,在 Python 中抛出异常是一个很轻量的操作。其次,第一种做法在性能上也要优于第二种,因为它不用在每次循环的时候都做一次额外的成员检查。
如果你想统计次数的话,直接用 collections.defaultdict 就可以了:
```
from collections import defaultdict
def counter_by_collections(l):
result = defaultdict(int)
for key in l:
result[key] += 1
return result
```
这样的代码既不用“获取许可”,也无需“请求原谅”。整个代码的控制流变得更清晰自然了。所以,如果可能的话,请尽量想办法省略掉那些非核心的异常捕获逻辑。一些小提示:
- 操作字典成员时:使用 collections.defaultdict 类型
或者使用 dict[key]=dict.setdefault(key,0)+1 内建函数
- 如果移除字典成员,不关心是否存在:
- 调用 pop 函数时设置默认值,比如 dict.pop(key,None)
- 在字典获取成员时指定默认值: dict.get(key, default_value)
- 对列表进行不存在的切片访问不会抛出 IndexError 异常: "foo"
## 使用 next() 函数,查找第一个符合的元素
next() 是一个非常实用的内建函数,它接收一个迭代器作为参数,然后返回该迭代器的下一个元素。使用它配合生成器表达式,可以高效的实现“从列表中查找第一个满足条件的成员”之类的需求。
```
numbers = [3, 7, 8, 2, 21]
# 获取并 **立即返回** 列表里的第一个偶数
print(next(i for i in numbers if i % 2 == 0))
```
## 使用有序字典来去重
字典和集合的结构特点保证了它们的成员不会重复,所以它们经常被用来去重。但是,使用它们俩去重后的结果会**丢失原有列表的顺序**。这是由底层数据结构“哈希表(Hash Table)”的特点决定的。
如果**既需要去重又必须保留顺序**怎么办?我们可以使用 `collections.OrderedDict` 模块
Hint: 在 Python 3.6 中,默认的字典类型修改了实现方式,已经变成有序的了。并且在 Python 3.7 中,该功能已经从 语言的实现细节 变成了为 可依赖的正式语言特性。
但是我觉得让整个 Python 社区习惯这一点还需要一些时间,毕竟目前“字典是无序的”还是被印在无数本 Python 书上。所以,我仍然建议在一切需要有序字典的地方使用 OrderedDict。
# 常见误区
## 当心那些已经枯竭的迭代器
我们提到了使用“懒惰”生成器的种种好处。但是,所有事物都有它的两面性。生成器的最大的缺点之一就是:它会枯竭。当你完整遍历过它们后,之后的重复遍历就不能拿到任何新内容了。
```
numbers = [1, 2, 3]
numbers = (i * 2 for i in numbers)
# 第一次循环会输出 2, 4, 6
for number in numbers:
print(number)
# 这次循环什么都不会输出,因为迭代器已经枯竭了
for number in numbers:
print(number)
```
而且不光是生成器表达式,Python 3 里的 map、filter 内建函数也都有一样的特点。忽视这个特点很容易导致代码中出现一些难以察觉的 Bug。
## 别在循环体内修改被迭代对象
```
def remove_even(numbers):
"""
去掉列表里所有的偶数
"""
for i, number in enumerate(numbers):
if number % 2 == 0:
# 有问题的代码
del numbers[i]
numbers = [1, 2, 7, 4, 8, 11]
remove_even(numbers)
print(numbers)
```
注意到结果里那个多出来的 “8” 了吗?当你在遍历一个列表的同时修改它,就会出现这样的事情。因为被迭代的对象 numbers 在循环过程中被修改了。遍历的下标在不断增长,而列表本身的长度同时又在不断缩减。这样就会导致列表里的一些成员其实根本就没有被遍历到。
所以对于这类操作,请使用一个新的空列表保存结果,或者利用 yield 返回一个生成器。而不是修改被迭代的列表或是字典对象本身。
# 判断pyhon版本
```
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
```
# 线程锁装饰器
```
def threadsafe_function(fcn):
"""decorator making sure that the decorated function is thread safe"""
lock = threading.RLock()
def new(*args, **kwargs):
"""Lock and call the decorated function
Unless kwargs['threadsafe'] == False
"""
threadsafe = kwargs.pop('threadsafe', True)
if threadsafe:
lock.acquire()
try:
ret = fcn(*args, **kwargs)
except Exception as excpt:
raise excpt
finally:
if threadsafe:
lock.release()
return ret
return new
```
# 异常处理装饰器
使用轮子:pip install retry
```python
from retry import retry
@retry(tries=3, delay=1, )
def test():
for i in range(-2, 5):
print(1/i)
test()
```
自定义:
# 参数检查装饰器
```
import inspect
def check(fn):
def wrapper(*args, **kwargs):
sig = inspect.signature(fn)
params = sig.parameters
values = list(params.values())
for i, p in enumerate(args):
param = values[i]
if param.annotation is not param.empty and not isinstance(
p, param.annotation):
print(p, '!==', values[i].annotation)
for k, v in kwargs.items():
if params[k].annotation is not inspect._empty and not isinstance(
v, params[k].annotation):
print(k, v, '!===', params[k].annotation)
return fn(*args, **kwargs)
return wrapper
@check
def add(x, y: int = 7) -> int: #我们要求第二个参数必须是int类型,并且返回值类型也为int
return x + y
print(add(2, 1))
print(add(20, y=10))
print(add(y=100, x=200))
print(add("Yin", "zhengjie")) #我们在实际传参时故意不按照要求传参,发现会有相应的提示信息
# 异常捕获装饰器(亦可用于类方法)
import functools
import time
import traceback
def try_except_log(f=None, max_retries: int = 5, delay: (int, float) = 1, step: (int, float) = 0,
exceptions: (BaseException, tuple, list) = BaseException, sleep=time.sleep,
process=None, validate=None, callback=None, default=None):
"""
函数执行出现异常时自动重试的简单装饰器
:param f: function 执行的函数。
:param max_retries: int 最多重试次数。
:param delay: int/float 每次重试的延迟,单位秒。
:param step: int/float 每次重试后延迟递增,单位秒。
:param exceptions: BaseException/tuple/list 触发重试的异常类型,单个异常直接传入异常类型,多个异常以tuple或list传入。
:param sleep: 实现延迟的方法,默认为time.sleep。
在一些异步框架,如tornado中,使用time.sleep会导致阻塞,可以传入自定义的方法来实现延迟。
自定义方法函数签名应与time.sleep相同,接收一个参数,为延迟执行的时间。
:param process: 处理函数,函数签名应接收一个参数,每次出现异常时,会将异常对象传入。
可用于记录异常日志,中断重试等。
如处理函数正常执行,并返回True,则表示告知重试装饰器异常已经处理,重试装饰器终止重试,并且不会抛出任何异常。
如处理函数正常执行,没有返回值或返回除True以外的结果,则继续重试。
如处理函数抛出异常,则终止重试,并将处理函数的异常抛出。
:param validate: 验证函数,用于验证执行结果,并确认是否继续重试。
函数签名应接收一个参数,每次被装饰的函数完成且未抛出任何异常时,调用验证函数,将执行的结果传入。
如验证函数正常执行,且返回False,则继续重试,即使被装饰的函数完成且未抛出任何异常。
如验证函数正常执行,没有返回值或返回除False以外的结果,则终止重试,并将函数执行结果返回。
如验证函数抛出异常,且异常属于被重试装饰器捕获的类型,则继续重试。
如验证函数抛出异常,且异常不属于被重试装饰器捕获的类型,则将验证函数的异常抛出。
:param callback: 回调函数,函数应接收一个参数,到达重试次数且异常无法处理时,会将异常对象传入。
可用于记录异常日志,发送异常日志等。
:param default: 发生异常时, process参数处理完返回True,返回:默认值 or 默认值生成函数
:return: 被装饰函数的执行结果。
"""
# 带参数的装饰器
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# nonlocal delay, step, max_retries
i = 0
func_exc, exc_traceback = None, None
while i < max_retries:
try:
result = func(*args, **kwargs)
# 验证函数返回False时,表示告知装饰器验证不通过,继续重试
if callable(validate) and validate(result) is False:
continue
else:
# 正常结束
return result
except exceptions as ex:
func_exc, exc_traceback = ex, traceback.format_exc()
# 处理函数返回True时,表示告知装饰器异常已经处理,终止重试
if callable(process):
try:
if process(ex) is True:
return default() if callable(default) else default
except Exception as e:
func_exc, exc_traceback = e, traceback.format_exc()
break
# finally:
i += 1
sleep(delay + step * i)
else:
# 回调函数,处理自动无法处理的异常
if callable(callback):
callback(func_exc, exc_traceback)
return default() if callable(default) else default
pass
return wrapper
if callable(f):
return decorator(f)
return decorator
@try_except_log(default=1)
def p():
return 1/0
p()
```
# 使用goto语句
```sh
pip install goto-statement
```
具体的语法
```
!pip install goto-statement
from goto import with_goto
@with_goto
def range(start, stop):
i = start
result = []
label .begin
if i == stop:
goto .end
result.append(i)
i += 1
goto .begin
label .end
return result
```
# enumerate 读取文件
如果要统计文件的行数,可以这样写:
count = len(open(filepath, 'r').readlines())
这种方法简单,但是可能比较慢,当文件比较大时甚至不能工作。
可以利用enumerate():
```python
count = 0
for index, line in enumerate(open(filepath,'r')):
count += 1
```
# 展开列表flat
```
a = [[[26, 25, 24], [23, 22, 21, 20, 19], [18, 17, 16]], [[15, 14, 13, 12, 11], [10, 9, 8], [7, 6, 5], [4, 3, 2, 1, 0], [[99, 98, 97], [96, 95, 94, 93], [92, 91, 90, 89, 88], [87, 86, 85, 84, 83]]], [[[82, 81, 80], [79, 78, 77, 76], [75, 74, 73], [72, 71, 70], [69, 68, 67]], [[66, 65, 64, 63], [62, 61, 60], [59, 58, 57, 56], [55, 54, 53, 52, 51]], [[50, 49, 48, 47, 46], [45, 44, 43, 42, 41], [40, 39, 38, 37, 36], [35, 34, 33, 32], [31, 30, 29, 28, 27]]]]
# result = []
# def flat_list(a: list):
# for i in a:
# if isinstance(i, list):
# flat_list(i)
# else:
# result.append(i)
def flat_list(input_list :list):
result = []
for i in input_list:
result.extend([i] if not isinstance(i, list) else flat_list(i))
return result
print(flat_list(a))
```
# bytes数据
- bytes(iterable_of_ints) -> bytes
- bytes(string, encoding[, errors]) -> bytes
- bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer
- bytes(int) -> bytes object of size given by the parameter initialized with null bytes
- bytes() -> empty bytes object
```
a = bytes('中国', 'utf-8')
b = bytes('abc', 'ascii')
c = bytes(1)
d = bytes()
print("{}\n{}\n{}\n{}".format(a,b,c, d))
list(b)
b.hex()
bytes.fromhex(b.hex())
```
## 性能测试
```
a = '123abc我永远爱我的祖国'
print(a.encode() == bytes(a, 'utf-8'))
%timeit a.encode()
%timeit bytes(a, 'utf-8')
```
# bytearray
- bytearray(iterable_of_ints) -> bytearray
- bytearray(string, encoding[, errors]) -> bytearray
- bytearray(bytes_or_buffer) -> mutable copy of bytes_or_buffer
- bytearray(int) -> bytes array of size given by the parameter initialized with null bytes
- bytearray() -> empty bytes array
```
a = bytearray([1,2,3])
b = bytearray("你好", 'utf-8')
c = bytearray(bytes('abc', 'ascii'))
d = bytearray(1)
e = bytearray()
print("{}\n{}\n{}\n{}\n{}".format(a,b,c, d, e))
list(c)
c.hex()
bytearray.fromhex(c.hex())
```
# binascii
```
import binascii
a = '123abc我永远爱我的祖国'
b = a.encode()
%timeit b.hex()
%timeit binascii.hexlify(b)
b_hex_str = b.hex()
b_hex_str
binascii.unhexlify(b_hex_str).decode()
binascii.hexlify(b)
c = binascii.a2b_hex(b_hex_str)
c
```
# ascii & repr
```
ascii('123abc万')
repr("123abc万")
```
# 版本
```
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
import Queue as queue
import SocketServer
else:
import queue
import socketserver as SocketServer
```
# 计算大小
```
import sys
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj,
(str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
get_size(1), get_size(10000000000000000)
```
# 图像二进制,bytes转换 (np.uint8)
1. 从numpy.ndarray (图片)到字节流
```python
x = img.tobytes()
```
2. 从二进制文件到图片(numpy.ndarray)
```python
```
```
cd C:\Users\13438\Desktop\
import matplotlib.pyplot as plt
import cv2
from PIL import Image
from io import BytesIO
import numpy as np
```
## 图片 转 三维ndarray
### 方法一: np.frombuffer/np.frombuffer + cv2.imdecode
```
with open('Image.jpg', 'rb') as f:
bin_str = f.read()
# nparr = np.frombuffer(bin_str, np.uint8) # 字符串
nparr = np.frombuffer(bin_str, np.uint8)
img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # cv2.CV_LOAD_IMAGE_COLOR in CV < 3.1
print(type(bin_str))
print(nparr.shape)
print(img_np.shape)
```
### 方法二: cv2.imread
```
img_np = cv2.imread('Image.jpg')
```
### 方法三 :plt.imread
```
with open('Image.jpg', 'rb') as f:
img_data = plt.imread(BytesIO(f.read()),"jpg")
print(type(img_data))
print(img_data.shape)
```
### 方法 四:PLT.Image
略
## 三维ndarray图 转 一维ndarray
encode 成一维,然后 tobytes()
```
# 构造图片
# img_data = np.linspace(0,255,100*100*3).reshape(100,100,-1).astype(np.uint8)
# 上面的图片转 ndarray
img_data = img_np
ret, buf = cv2.imencode(".jpg", img_data)
if ret:
print(img_data.shape, buf.shape)
```
## 一维转 bytes
接着上一步得到的一维数组
### 方法一:
需要转化
```
if ret:
img_bin1 = Image.fromarray(np.uint8(buf)).tobytes()
# 保存图片
with open('pic.jpg', 'wb') as f:
f.write(img_bin1)
```
### 方法二:
直接转 bytes()
```
img_bin2 = buf.tobytes()
# 保存。。。
with open('pic2.jpg', 'wb') as f:
f.write(img_bin2)
```
### 两种方法结果对比
方法二省去了中间转换步骤,效率提高了200~300倍
```
img_bin1 == img_bin2
%timeit img_bin1 = Image.fromarray(np.uint8(buf)).tobytes()
%timeit a = buf.tobytes()
```
## 三维转bytes
先转一维再保存为bytes
```
cd assets/
import cv2
from PIL import Image
from io import BytesIO
# 读一张数组到 三维数组
img_data = cv2.imread('lena.jpg')
# 转为一维
ret, buf = cv2.imencode(".jpg", img_data)
assert ret
print(img_data.shape, buf.shape)
# 展示
img = Image.open(BytesIO(buf.tobytes()))
img
img = Image.fromarray(cv2.cvtColor(img_data, cv2.COLOR_BGR2RGB))
img
```
# 懒加载
```
class LazyImport:
def __init__(self, module_name):
self.module_name = module_name
self.module = None
def __getattr__(self, name):
if self.module is None:
self.module = __import__(self.module_name)
return getattr(self.module, name)
string = LazyImport("string")
print (string.digits)
```
# 创建文件夹
```
import errno
def mkdir_p_ex(path):
try:
if os.path.exists(path) and os.path.isdir(path):
return True, None
os.makedirs(path)
return True, None
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
return True, None
else:
return False, str(exc)
except Exception as e:
return False, str(e)
```
# 设置root日志等级
```
import logging
logging.root.setLevel(_CONFIG.get('INFO'))
```
# 通过UUID生成短随机ID
uuid-hex长度为32,此算法缩短为22
```
import string
import uuid
def base62(num, base=string.digits + string.ascii_letters, padding=1):
"""Convert a number to base-62 representation."""
assert num >= 0
digits = []
while num:
num, rest = divmod(num, 62)
digits.append(base[rest])
digits.extend(base[0] for _ in range(len(digits), padding))
return ''.join(reversed(digits))
print(base62(uuid.uuid4().int, padding=22))
```
# 快速补全\_\_all__
借鉴 `types`模块 源码最后一行
```
__all__ = [i for i in globals() if i[:1] != '_']
```
# 动态函数设计思想
条件不同执行的操作不同时,且条件一般很少改变时,为了减少执行中的判断语句,可将该操作根据不同的条件执行的操作封装称一个方法,当条件改变时也将该函数重新赋值即可。
例:
```python
if condition1:
操作1
操作2
elif condition2:
操作3
else condition3:
操作4
```
为减少判断,可以改成以下方式:
```python
def run1():
操作1
操作2
def run2():
操作3
def run3():
操作4
# 初始执行函数(方法)
default_run = run1
def condition_change(cond):
global default_run
...
# 条件改变时,切换相应的执行函数(方法)
default_run = run2
while True:
# 不用判断,只管执行
default_run()
```
# 查看方法源码位置、行号
```
import functools
import inspect
def _get_function_source(func):
func = inspect.unwrap(func)
if inspect.isfunction(func):
code = func.__code__
return (code.co_filename, code.co_firstlineno)
if isinstance(func, functools.partial):
return _get_function_source(func.func)
if isinstance(func, functools.partialmethod):
return _get_function_source(func.func)
return None
def foo():
print('aaa')
print(_get_function_source(foo))
```
# 运行异步或者同步
```
from functools import partial
import asyncio
def iscoroutinefunction_or_partial(object):
if isinstance(object, partial):
object = object.func
return asyncio.iscoroutinefunction(object)
def run_coroutine_or_function(func, *args, callback=None, **kwargs):
if iscoroutinefunction_or_partial(func):
f = asyncio.ensure_future(func(*args, **kwargs))
if callback is not None:
f.add_done_callback(callback)
else:
func(*args, **kwargs)
async def test():
await asyncio.sleep(1)
print('test')
run_coroutine_or_function(print, 'hello')
run_coroutine_or_function(test)
asyncio.get_event_loop().run_forever()
```
# Http 状态码判断
参考:`from httpx import codes`
```
def is_error(value: int) -> bool:
return 400 <= value <= 599
def is_client_error(value: int) -> bool:
return 400 <= value <= 499
def is_server_error(value: int) -> bool:
return 500 <= value <= 599
```
# 限制调用栈
有时候,我们的某些函数可能要限制调用。例如函数 A 只能被函数 B、函数 C 调用,不能被其他函数调用。我们可以通过分析函数的调用栈来通过代码解决这个问题。查询调用栈,可以使用inspect模块的stack()函数。
[详细介绍](https://mp.weixin.qq.com/s/fHpQ4NNSKkhHrQ15JUieLg)
```
import inspect
def call_stack_check(valid_function_list=None):
def decorate(func):
def wrap(*args, **kwargs):
if valid_function_list:
stack = inspect.stack()
upper_function_name = stack[1].function
if upper_function_name not in valid_function_list:
raise Exception('非法调用!')
else:
print('允许执行后续代码')
result = func(*args, **kwargs)
return result
return wrap
return decorate
@call_stack_check(['dance'])
def jump():
print('既然是跳舞,肯定要跳起来')
def dance():
print('开始跳舞')
jump()
def sing():
print('开始跳舞')
jump()
dance() # 允许
# sing() # 非法
```
# 打印错误栈
```
import traceback
def test2():
raise TypeError('abc')
def main():
test2()
try:
main()
except Exception as e:
for frame, lineno in traceback.walk_tb(e.__traceback__):
print(frame.f_code.co_name)
```
# 警告
```
import warnings
warnings.warn("xxx is deprecated since Python 3.8, and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
```
| github_jupyter |
## Visualizing Earnings Based On College Majors
In this project we are going to visualize the salary eaarned by professionals from popular major. We will be working with dataset on job outcome of students who graduated from college between 2010 and 2012. The dataset was realesed by [American Community Survey](https://www.census.gov/programs-surveys/acs/), FiveThirtyEight cleanded the dataset and released it on their [Githib reop](https://github.com/fivethirtyeight/data/tree/master/college-majors).
First we are going to import the libraries,read the file and print few of the rows in the dataset.
```
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
recent_grads = pd.read_csv("recent-grads.csv")
print(recent_grads.iloc[0])
#Printing first five rows form the dataset.
recent_grads.head()
#Printing last five rows form the dataset
recent_grads.tail()
#Printing the stat of the dataset
recent_grads.describe()
#Printing the shape of the dataset
raw_data_count = recent_grads.shape
raw_data_count
#Droping the missing rows in the dataset
recent_grads = recent_grads.dropna()
cleaned_data_count = recent_grads.shape
cleaned_data_count
```
First there was just one row with the missing value, which was droped leaving with 172 and 21 columns.
```
recent_grads.plot(x='Sample_size', y='Employed', kind='scatter')
recent_grads.plot(x="Sample_size", y = "Employed",kind= "scatter",title = "Employed vs. Sample_size", figsize = (5,10))
recent_grads.plot(x = "Sample_size",y= "Median",kind = "scatter",title="Sample_size vs. Median",figsize = (5,10))
recent_grads.plot(x = "Sample_size",y="Unemployment_rate",kind = "scatter",title = "sample_size vs. unemployment_rate",figsize = (5,10))
recent_grads["Sample_size"].hist(bins = 25,range = (0,5000))
recent_grads["Median"].hist()
```
From the plot we can see that the median salary range is between $ 30,000 and $ 40,000.
```
recent_grads["Employed"].hist(bins = 25,range = (0,5000))
recent_grads["Men"].hist()
recent_grads['Women'].hist()
from pandas.plotting import scatter_matrix
scatter_matrix(recent_grads[["Women","Men"]], figsize = (10,10))
from pandas.plotting import scatter_matrix
scatter_matrix(recent_grads[["Sample_size", "Median"]], figsize = (10,10))
scatter_matrix(recent_grads[["Sample_size", "Median","Unemployment_rate"]],figsize= (15,15))
recent_grads[:5]["Women"].plot(kind = "bar")
recent_grads[:5].plot.bar(x= "Major",y="Women")
recent_grads[:10].plot.bar(x = 'Major', y ='ShareWomen')
```
From the first ten plot we can see that Astronomy and Astrophysics is the major where large percentage of womens are populated and thereafter Actuarial Science.
```
recent_grads[163:].plot.bar(x = 'Major', y = 'Unemployment_rate')
```
While form the last ten rows we notice Clinical Psychology is the most populated area oppcupied by women.
```
recent_grads['men'] = 1 - recent_grads['ShareWomen']
recent_grads[:10].plot.bar(x = 'Major', y = ['ShareWomen','men'])
```
As it can be seen from the first ten rows that majority of the majors are occcupied by mens.
```
recent_grads.columns
recent_grads['Median'].plot.box()
```
The above figure shows that the median salary is between 35,000 & 40,000 dollars.
```
recent_grads['Unemployment_rate'].plot.box()
```
## Hexbin plots
```
recent_grads.plot.hexbin(x = 'Sample_size', y= 'Unemployment_rate', gridsize= 30)
recent_grads.plot.hexbin(x= 'Sample_size', y = 'Unemployment_rate', gridsize=30)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.