text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
<a href="https://colab.research.google.com/github/IanTirok/Climate-change-tweets-analysis/blob/main/Climate_Change_Tweets_Classification_models.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
##Kneighbors classifier
```
!pip install wandb
import wandb
wandb.login()
from wandb.keras import WandbCallback
config = wandb.config
wandb.init(project="Climate change Tweet Classification",
notes="tweak baseline",
tags=["baseline", "Remote Learning"],
config=config)
import pandas as pd
import numpy as np
from google.colab import drive
from sklearn.metrics import confusion_matrix,accuracy_score, classification_report
# drive.mount('/content/drive')
train_df = pd.read_csv('/content/Clean classified data_kwisha.csv')
test_df = pd.read_csv('/content/Test (2).csv')
train_df['class'].value_counts()
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
train_df['class'] = le.fit_transform(train_df['class'])
#SPLITTING THE TRAINING DATASET INTO TRAINING AND VALIDATION
# Input: "cleaned tweets"
# Target: "class"
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_df["clean_text"],
train_df["class"],
test_size=0.05,
shuffle=True,
stratify =train_df["class"] )
#import necessary libraries
import nltk
from nltk.tokenize import word_tokenize
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
X_train_tok= [nltk.word_tokenize(i) for i in X_train] #for word2vec
X_val_tok= [nltk.word_tokenize(i) for i in X_val] #for word2vec
#TF-IDF
# Convert x_train to vector since model can only run on numbers and not words- Fit and transform
#import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(use_idf=True, )
X_train_vectors_tfidf = tfidf_vectorizer.fit_transform(X_train) #tfidf runs on non-tokenized sentences unlike word2vec
# Only transform x_test (not fit and transform)
X_val_vectors_tfidf = tfidf_vectorizer.transform(X_val) #Don't fit() your TfidfVectorizer to your test data: it will
#change the word-indexes & weights to match test data. Rather, fit on the training data, then use the same train-data-
#fit model on the test data, to reflect the fact you're analyzing the test data only based on what was learned without
#it, and the have compatible
test_df.head()
# # Replace classes that are related
# train_df["class"].replace({"famine": "drought", "water": "floods"}, inplace=True)
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(train_df['clean_text'])
tokenizer.fit_on_texts(test_df['clean_text'])
vocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index
# split the data into labels and features
X = X_train_vectors_tfidf
y = train_df['class']
#Defining the hyper parameters for the Knearest Neighbors Classifier
leaf_size = list(range(7,20))
n_neighbors = list(range(3,10))
p = [1,2]
metric = ['manhattan', 'euclidean', 'minkowski']
#Creating a dictionary with the hyperparameters
hyperparameters = dict(leaf_size = leaf_size, n_neighbors = n_neighbors, p=p,metric = metric)
#I used GridSearch to look for the best parameters
from sklearn.model_selection import RandomizedSearchCV
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier()
clf = RandomizedSearchCV(classifier, hyperparameters,cv=10)
clf = clf.fit(X_train_vectors_tfidf, y_train)
print('\n')
print(clf.best_params_)
print('\n')
#Creating a better model based on the parameters given to us by the gridsearch
modelone = KNeighborsClassifier(leaf_size = clf.best_params_['leaf_size'], n_neighbors = clf.best_params_['n_neighbors'], p = clf.best_params_['p'], metric = clf.best_params_['metric']).fit(X_train_vectors_tfidf, y_train)
modelone
# predicting using the model built
y_pred = modelone.predict(X_val_vectors_tfidf)
# comparing the actual and predicted
comparison_frame = pd.DataFrame({'Actual': y_val, 'Predicted': y_pred})
print('\n')
print(comparison_frame)
print('\n')
print(comparison_frame.describe())
# Evaluating the Algorithm
# ---
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_val, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_val, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
print('The accuracy of the model is ',metrics.accuracy_score(y_val, y_pred))
print('The F1 score is: ',metrics.f1_score(y_val, y_pred,average='weighted'))
print('The recall score is ',metrics.recall_score(y_val, y_pred,average='weighted'))
print('\n', 'Confusion matrix')
print(confusion_matrix(y_val, y_pred))
print('\n', 'Classification report')
print(classification_report(y_val, y_pred))
#define X variable for fitting of the new model
test_df
X_val = test_df['clean_text']
X_val_vectors_tfidf = tfidf_vectorizer.transform(X_val)
#fit the tfidf model on the test data, to reflect the fact you're analyzing the test data only based on what was learned without
#it, and the have compatible
# predict using the test dataset
y_pred = modelone.predict(X_val_vectors_tfidf)
y_pred = le.inverse_transform(y_pred)
#Adding the predicted values to our dataset
test_df['class'] = y_pred
#Lets save our new dataset with class names
# test_df.to_csv('/content/drive/MyDrive/Module 2 groupwork Datasets/Test df with classes KNNclassifier.csv')
```
# Naive Bayes
```
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
#SPLITTING THE TRAINING DATASET INTO TRAINING AND VALIDATION
# Input: "cleaned tweets"
# Target: "class"
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_df["clean_text"],
train_df["class"],
test_size=0.05,
shuffle=True,
stratify =train_df["class"] )
#import necessary libraries
import nltk
from nltk.tokenize import word_tokenize
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
X_train_tok= [nltk.word_tokenize(i) for i in X_train] #for word2vec
X_val_tok= [nltk.word_tokenize(i) for i in X_val] #for word2vec
#TF-IDF
# Convert x_train to vector since model can only run on numbers and not words- Fit and transform
#import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(use_idf=True, )
X_train_vectors_tfidf = tfidf_vectorizer.fit_transform(X_train) #tfidf runs on non-tokenized sentences unlike word2vec
# Only transform x_test (not fit and transform)
X_val_vectors_tfidf = tfidf_vectorizer.transform(X_val) #Don't fit() your TfidfVectorizer to your test data: it will
#change the word-indexes & weights to match test data. Rather, fit on the training data, then use the same train-data-
#fit model on the test data, to reflect the fact you're analyzing the test data only based on what was learned without
#it, and the have compatible
```
Gaussian NB
```
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
# Training and fitting the multinomial model
gausnb = GaussianNB()
gausnb.fit(X_train_vectors_tfidf.toarray(),y_train)
y_pred = gausnb.predict(X_val_vectors_tfidf.toarray())
#Checking performance our model with performance metrics.
comparison_frame = pd.DataFrame({'Actual': y_val, 'Predicted': y_pred})
print('\n')
print(comparison_frame)
print('\n')
print(comparison_frame.describe())
# Evaluating the Algorithm
# ---
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_val, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_val, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
print('The accuracy of the model is ',metrics.accuracy_score(y_val, y_pred))
print('The F1 score is: ',metrics.f1_score(y_val, y_pred,average='weighted'))
print('The recall score is ',metrics.recall_score(y_val, y_pred,average='weighted'))
print('\n', 'Confusion matrix')
print(confusion_matrix(y_val, y_pred))
print('\n', 'Classification report')
print(classification_report(y_val, y_pred))
```
Bernoulli NB
```
# Training and fitting the bernoulli model
bernb = BernoulliNB()
bernb.fit(X_train_vectors_tfidf,y_train)
y_pred = bernb.predict(X_val_vectors_tfidf)
#Checking performance our model with performance metrics.
# comparison_frame = pd.DataFrame({'Actual': y_val, 'Predicted': y_pred})
print('\n')
print(comparison_frame)
print('\n')
print(comparison_frame.describe())
# Evaluating the Algorithm
# ---
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_val, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_val, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
print('The accuracy of the model is ',metrics.accuracy_score(y_val, y_pred))
print('The F1 score is: ',metrics.f1_score(y_val, y_pred,average='weighted'))
print('The recall score is ',metrics.recall_score(y_val, y_pred,average='weighted'))
print('\n', 'Confusion matrix')
print(confusion_matrix(y_val, y_pred))
print('\n', 'Classification report')
print(classification_report(y_val, y_pred))
```
Multinomial NB
```
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
text_clf = Pipeline([('clf', MultinomialNB())])
tuned_parameters = {
'vect__ngram_range': [(1, 1), (1, 2), (2, 2)],
'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'clf__alpha': [1, 1e-1, 1e-2]}
# Training and fitting the multinomial model
mnb = MultinomialNB()
mnb.fit( X_train_vectors_tfidf , y_train)
y_pred = mnb.predict(X_val_vectors_tfidf)
#Checking performance our model with performance metrics.
comparison_frame = pd.DataFrame({'Actual': y_val, 'Predicted': y_pred})
print('\n')
print(comparison_frame)
print('\n')
print(comparison_frame.describe())
# Evaluating the Algorithm
# ---
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_val, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_val, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
print('The accuracy of the model is ',metrics.accuracy_score(y_val, y_pred))
print('The F1 score is: ',metrics.f1_score(y_val, y_pred,average='weighted'))
print('The recall score is ',metrics.recall_score(y_val, y_pred,average='weighted'))
print('\n', 'Confusion matrix')
print(confusion_matrix(y_val, y_pred))
print('\n', 'Classification report')
print(classification_report(y_val, y_pred))
```
Since the accuracy is 60.7%, we will try to improve the model by hyperparameter tuning.
Hyperparameter Tuning
```
#Tuning hyperparameters and transforming features to a normal distribution
from sklearn.preprocessing import PowerTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedStratifiedKFold
alphas = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
p_grid_NB = {'alpha': alphas}
NB_cls= MultinomialNB()
grid = GridSearchCV(estimator = NB_cls, param_grid = p_grid_NB, cv = 5)
grid.fit(X_train_vectors_tfidf, y_train)
grid.best_params_
# Training and fitting the multinomial model with hyperparameter tuned
mnb = MultinomialNB(alpha=0.1)
mnb.fit( X_train_vectors_tfidf , y_train)
y_pred = mnb.predict(X_val_vectors_tfidf)
#Checking performance our model with performance metrics.
comparison_frame = pd.DataFrame({'Actual': y_val, 'Predicted': y_pred})
print('\n')
print(comparison_frame)
print('\n')
print(comparison_frame.describe())
# Evaluating the Algorithm
# ---
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_val, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_val, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
print('The accuracy of the model is ',metrics.accuracy_score(y_val, y_pred))
print('The F1 score is: ',metrics.f1_score(y_val, y_pred,average='weighted'))
print('The recall score is ',metrics.recall_score(y_val, y_pred,average='weighted'))
print('\n', 'Confusion matrix')
print(confusion_matrix(y_val, y_pred))
print('\n', 'Classification report')
print(classification_report(y_val, y_pred))
```
#Random_Forests_classifier_Climate_change_tweet_classification
```
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
train_df['class'] = le.fit_transform(train_df['class'])
import pandas as pd
import numpy as np
from google.colab import drive
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import r2_score
#drive.mount('/content/drive')
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
#SPLITTING THE TRAINING DATASET INTO TRAINING AND VALIDATION
# Input: "cleaned tweets"
# Target: "class"
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_df["clean_text"],
train_df["class"],
test_size=0.05,
shuffle=True,
stratify =train_df["class"] )
#import necessary libraries
import nltk
from nltk.tokenize import word_tokenize
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
X_train_tok= [nltk.word_tokenize(i) for i in X_train] #for word2vec
X_val_tok= [nltk.word_tokenize(i) for i in X_val] #for word2vec
#TF-IDF
# Convert x_train to vector since model can only run on numbers and not words- Fit and transform
#import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(use_idf=True, )
X_train_vectors_tfidf = tfidf_vectorizer.fit_transform(X_train) #tfidf runs on non-tokenized sentences unlike word2vec
# Only transform x_test (not fit and transform)
X_val_vectors_tfidf = tfidf_vectorizer.transform(X_val) #Don't fit() your TfidfVectorizer to your test data: it will
#change the word-indexes & weights to match test data. Rather, fit on the training data, then use the same train-data-
#fit model on the test data, to reflect the fact you're analyzing the test data only based on what was learned without
#it, and the have compatible
# split the data into labels and features
# Creating a dictionary of parameters to tune
#
parameters = {'n_estimators': np.arange(10,13),
'max_depth': np.arange(7,17)}
# Setting the number of folds to 5 and instantiating the model
#
grid_search = RandomizedSearchCV(RandomForestClassifier(), parameters, cv = 5, return_train_score = True)
grid_search.fit(X_train_vectors_tfidf, y_train)
print(grid_search.best_params_)
print('\n')
#Lets see how the different max depth values compare to each other
print('how do the different depths compare to each other''\n')
for i in range(len(parameters['max_depth'])):
print('parameters', grid_search.cv_results_['params'][i])
print('mean Test scores:', grid_search.cv_results_['mean_test_score'][i])
print('Rank:', grid_search.cv_results_['rank_test_score'][i])
print('\n')
dtree_model = RandomForestClassifier(n_estimators = grid_search.best_params_['n_estimators'], max_depth = grid_search.best_params_['max_depth']).fit(X_train_vectors_tfidf, y_train)
dtree_model
print('Training score:', dtree_model.score(X_train_vectors_tfidf, y_train))
print('Test score:', r2_score(y_val, y_pred))
y_pred = dtree_model.predict(X_train_vectors_tfidf)
#Random Forests model
comparison_frame = pd.DataFrame({'Actual': y_val, 'Predicted': y_pred})
print('\n')
print(comparison_frame)
print('\n')
print(comparison_frame.describe())
# Evaluating the Algorithm
# ---
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_val, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_val, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
print('The accuracy of the model is ',metrics.accuracy_score(y_val, y_pred))
print('\n', 'Confusion matrix')
print(confusion_matrix(y_val, y_pred))
print('\n', 'Classification report')
from sklearn import metrics
# print('Mean Absolute Error:', metrics.mean_absolute_error(y_val, y_pred))
# print('Mean Squared Error:', metrics.mean_squared_error(y_val, y_pred))
# print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
print('The accuracy of the model is ',metrics.accuracy_score(y_val, y_pred))
print('\n', 'Confusion matrix')
print(confusion_matrix(y_val, y_pred))
print('\n', 'Classification report')
#define X variable for fitting of the new model
test_df
X_val = test_df['clean_text']
X_val_vectors_tfidf = tfidf_vectorizer.transform(X_val)
#fit the tfidf model on the test data, to reflect the fact you're analyzing the test data only based on what was learned without
#it, and the have compatible
# predict using the test dataset
y_pred = dtree_model.predict(X_val_vectors_tfidf)
y_pred = le.inverse_transform(y_pred)
#Adding the predicted values to our dataset
test_df['class'] = y_pred
#Lets save our new dataset with class names
test_df.to_csv('/content/drive/MyDrive/Module 2 groupwork Datasets/Test df with classes RandomForestsclassifier.csv')
```
#Logistic Regression
```
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.metrics import r2_score
import sklearn
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
train_df = pd.read_csv('/content/Clean classified data_kwisha.csv')
test_df = pd.read_csv('Test (2).csv')
sns.pairplot(train_df, hue='class', size=2.5)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
train_df['class'] = le.fit_transform(train_df['class'])
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_df["clean_text"],
train_df["class"],
test_size=0.05,
shuffle=True,
stratify =train_df["class"] )
import nltk
from nltk.tokenize import word_tokenize
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
X_train_tok= [nltk.word_tokenize(i) for i in X_train]
X_val_tok= [nltk.word_tokenize(i) for i in X_val]
# Convert x_train to vector since model can only run on numbers and not words- Fit and transform
#import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(use_idf=True, )
X_train_vectors_tfidf = tfidf_vectorizer.fit_transform(X_train) #tfidf runs on non-tokenized sentences unlike word2vec
# Only transform x_test (not fit and transform)
X_val_vectors_tfidf = tfidf_vectorizer.transform(X_val) #Don't fit() your TfidfVectorizer to your test data: it will
#change the word-indexes & weights to match test data. Rather, fit on the training data, then use the same train-data-
#fit model on the test data, to reflect the fact you're analyzing the test data only based on what was learned without
#it, and the have compatibl
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
from sklearn.linear_model import LogisticRegression
lr_tfidf=LogisticRegression(solver = 'liblinear', C=10, penalty = 'l2')
lr_tfidf.fit(X_train_vectors_tfidf, y_train) #model
#Predict y value for test dataset
y_predict = lr_tfidf.predict(X_val_vectors_tfidf)
y_prob = lr_tfidf.predict_proba(X_val_vectors_tfidf)[:,1]
baselog_accuracy = accuracy_score(y_val, y_predict)
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_val, y_predict))
print('Mean Squared Error:', metrics.mean_squared_error(y_val, y_predict))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_predict)))
print('The accuracy of the model is ',metrics.accuracy_score(y_val, y_predict))
print('The F1 score is: ',metrics.f1_score(y_val, y_predict,average='weighted'))
print('The recall score is ',metrics.recall_score(y_val, y_predict,average='weighted'))
print('\n', 'Confusion matrix')
print(confusion_matrix(y_val, y_predict))
print('\n', 'Classification report')
print(classification_report(y_val, y_predict))
from six import StringIO
# Import the libraries
from sklearn.datasets import make_classification
from sklearn.datasets import make_classification
from imblearn.over_sampling import SVMSMOTE
lr_tfidf = SVMSMOTE(random_state = 101)
# Choosing a sample
X_oversample_svm, y_oversample_svm = make_classification(n_samples=10000, n_features=2,
n_redundant=0, n_clusters_per_class=1,
weights=[0.99], flip_y=0, random_state=101)
# Perform Logistic Regression
X_oversample_svm, y_oversample_svm = lr_tfidf.fit_resample(X_train_vectors_tfidf, y_train)
classifier_svm = LogisticRegression(solver = 'liblinear', C=10, penalty = 'l2')
classifier_svm.fit(X_oversample_svm, y_oversample_svm)
#Predict y value for test dataset
y_predict = classifier_svm.predict(X_val_vectors_tfidf)
y_prob = classifier_svm.predict_proba(X_val_vectors_tfidf)[:,1]
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_val, y_predict))
print('Mean Squared Error:', metrics.mean_squared_error(y_val, y_predict))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_predict)))
print('The accuracy of the model is ',metrics.accuracy_score(y_val, y_predict))
print('The F1 score is: ',metrics.f1_score(y_val, y_predict,average='weighted'))
print('The recall score is ',metrics.recall_score(y_val, y_predict,average='weighted'))
print('\n', 'Confusion matrix')
print(confusion_matrix(y_val, y_predict))
print('\n', 'Classification report')
print(classification_report(y_val, y_predict))
#print(classification_report(y_test, classifier_svm.predict(X_test)))
accuracy_score(y_val, y_predict)
```
## Transformers
## Transformers
```
! pip install ktrain
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID';
os.environ['CUDA_VISIBLE_DEVICES']='0';
import pandas as pd
import ktrain
from ktrain import text
df=pd.read_csv('/content/Clean classified data_kwisha.csv')
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
df['class'] = le.fit_transform(df['class'])
data_texts = df["clean_text"].to_list() # Features (not-tokenized yet)
data_labels = df["class"].to_list() # Lables
from sklearn.model_selection import train_test_split
# # Split Train and Validation data
# train_texts, _texts, train_labels, val_labels = train_test_split(data_texts, data_labels, test_size=0.2, random_state=0)
X_train,X_test,y_train,y_test=train_test_split(data_texts, data_labels, test_size=0.2, random_state=0)
# # Keep some data for inference (testing)
# train_texts, test_texts, train_labels, test_labels = train_test_split(train_texts, train_labels, test_size=0.01, random_state=0)
len(X_test),len(X_train),len(y_test),len(y_train)
```
Build model with Transformer
```
# Build the model with pretrained model
model_name='distilbert-base-uncased'
categories=['renewable','drought','floods','air_polutants','temperature','greentalk']
#transformer
trans=text.Transformer(model_name,maxlen=512,class_names=categories)
# Get the train and test data
train_data=trans.preprocess_train(X_train,y_train)
test_data=trans.preprocess_train(X_test,y_test)
# We retrive the transformer distilbert classifier
model=trans.get_classifier()
# Get the learner
learner=ktrain.get_learner(model,train_data=train_data,val_data=test_data,batch_size=16)
# Train the model
learner.lr_find(show_plot=True,max_epochs=3) # we us ethe code to find the best learning rate
learner.fit_onecycle(0.003,1)
# Test the metrics
learner.validate(class_names=categories)
X_test[553]
```
Making predictions
```
predictor=ktrain.get_predictor(learner.model,prepoc=trans)
x='Six bodies of the flash flood victims have been recovered, leaving one tourist missing. The search and rescue operation continues as we reach out to next of kin to share details of (this) sad incident and plan together (our) next course of action.”'
# We predict the random sentence
predictor.predict(x)
from wandb.keras import WandbCallback
```
| github_jupyter |
```
##############################################################################
# Script used in the paper:
# Dune migration and volume change from airborne LiDAR, terrestrial LiDAR
# and Structure from Motion--Multi View Stereo
# by
# Carlos H. Grohmann et al - 2019/2020
# guano (at) usp (dot) br
# Institute of Energy and Environment - University of Sao Paulo
#
# Please check the GitHub repo for the final reference to the paper
##############################################################################
```
#### Monte Carlo analysis of random points
To compare the elevation of elevation datasets, a few key points must be considered beforehand, such as spatial resolution, presence of voids and number of samples used in the analysis. A direct comparison of the raster datasets on a pixel-by-pixel basis, is not the best approach, since differences in pixel size and presence of voids could affect correlation analysis, and the large number of samples would impact descriptive statistics, goodness-of-fit and error measurements.
A random sample of elevation values can overcome these issues, but raises the question of how many data points are needed to properly represent the original dataset. To answer this question, a Monte Carlo approach was devised in the following form:
- the model was run 50 times;
- the number of random points analyzed was n=50, 100, 250, 500, 1000, 2500, 5000 and 10000;
- in each run, *n* random points were created and elevation was extracted from SRTM;
- after 50 runs, correlation was calculated between the first and the 49 subsequent sets of *n* random points;
- a Four Parameter Logistic Regression (4PL) was calculated for the mean value of correlation of each set of *n* random points.
In order to ensure reproducibility of the analysis, the random seed used to generate points was set to the sequential number of each model run (0,1,2,3,...,49) multiplied by a constant. Results of this approach have shown that for the TLS aurvey area, 1000 random points can be used to describe the elevation of the dataset.
```
# 1 - Extract elevation for sets of randon points [50,100,250,500,1000,2000,5000,10000] and then
# calculate stats for each.
# MonteCarlo-like analysis:
# 0 - for a series of n random points [50,100,250,500,1000,2000,5000,10000]:
# 1 - get X sets of n random points (rand_n_01, rand_n_02, rand_n_03,...) - sorted
# 2 - calculate correlation between first set and all others
# 3 - put data in a table and plot the results
# 4 - make plot off all values (X = n_points, Y = correlation)
# import python libraries
import sys, os, itertools
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import seaborn as sns
import subprocess
from IPython.display import Image # can use this to display GRASS maps
# stats
from statsmodels.graphics.gofplots import qqplot
from scipy.stats import linregress
from scipy.optimize import leastsq
# helper func: delete all *random maps
def clean_rand():
grass.run_command('g.remove', type='vector', pattern='*random*', flags='f')
# helper func: round to nearest 5
def round5(x):
rounded = int(round(x/5.0)*5.0)
return rounded
# aux func: sample DEMs elevations
# requires dbsae connection in GRASS to be SQLITE
def sample_dems_mc(dem, n_points, mc_run, ow_vector, ow_what, vmask):
''' create random points for selected tile and sample the elevation values
simplified version of sample_dems tailored for MonteCarlo-like analysis
dem = raster map (elevation)
n_points = number of random points
mc_run = number of times a set of random pints will be created
ow_vector = should vector maps be overwritten?
ow_what = re-run v.what.rast ?
vmask = vector mask to restrict points
note: to keep random points really randon and yet ensure reproducibility,
random seed is set to the value of mc_run * 42'''
grass.run_command('g.region', raster=dem, flags='a')
# random points
vector_name = dem.split('_')[0] +'_random_' + str(n_points) + '_' + str(mc_run).zfill(2)
grass.run_command('v.random', output=vector_name, npoints=n_points, restrict=vmask, seed=mc_run*42, quiet=True, overwrite=ow_vector)
rand_col = 'rand_' + str(n_points) + '_' + str(mc_run)
grass.run_command('v.db.addtable', map=vector_name, columns=rand_col+' double precision', quiet=True, overwrite=ow_vector)
# sample raster map - force if overwrite vector is true
if ow_vector or ow_what:
grass.run_command('v.what.rast', map=vector_name, raster=dem, column=rand_col, quiet=True)
# export as ascii and read into python
xyz = grass.read_command('v.out.ascii', input=vector_name, type='point', format='point', columns=rand_col, overwrite=True)
elev_list = [float(attr.split('|')[3]) if attr.split('|')[3] != '' else None for attr in xyz.split('\n')[:-1]]
elev = np.asarray(elev_list, dtype=np.float64)
return elev
# aux func: fits a 4PL curve to mean of correlation values
# plots and funcs from http://people.duke.edu/~ccc14/pcfb/analysis.html
def logistic4(x, A, B, C, D):
''' 4PL logistic equation '''
return ((A-D)/(1.0+((x/C)**B))) + D
def residuals(p, y, x):
''' Deviations of data from fitted 4PL curve '''
A,B,C,D = p
err = y-logistic4(x, A, B, C, D)
return err
def peval(x, p):
''' Evaluated value at x with current parameters '''
A,B,C,D = p
return logistic4(x, A, B, C, D)
# matplotlib figures appear inline in the notebook rather than in a new window.
%matplotlib inline
# create GRASS GIS runtime environment
# with this, you can run GRASS without startig a shell/gui session
gisbase = subprocess.check_output(["grass76", "--config", "path"]).strip()
os.environ['GISBASE'] = gisbase
sys.path.append(os.path.join(gisbase, "etc", "python"))
# GRASS GIS imports
import grass.script as grass
import grass.script.setup as gsetup
import grass.script.array as garray
import grass.script.vector as gvect
# set GRASS GIS session data
# I use two systems, so this makes things a bit easier
if sys.platform == "linux" or sys.platform == "linux2":
rcfile = gsetup.init(gisbase, "/mnt/sda/grassdata/", "utm", "garopaba_22J")
elif sys.platform == "darwin":
rcfile = gsetup.init(gisbase, "/Volumes/MacintoshHD2/grassdata/", "utm", "garopaba_22J")
# elif platform == "win32":
# Windows...
# grass.message('Current GRASS GIS 7 environment:')
# print grass.gisenv()
# overwrite for GRASS modules
ow = True
# Data dir
# use this to set different paths for different systems
if sys.platform == "linux" or sys.platform == "linux2":
dataDir = '/mnt/sda/Dropbox/USP/projetosPesquisa/LiDAR_terrestre_SfM/_areas_estudo/garopaba/monteCarlo/'
elif sys.platform == "darwin":
dataDir = '/Volumes/MacintoshHD2/Dropbox/USP/projetosPesquisa/LiDAR_terrestre_SfM/_areas_estudo/garopaba/monteCarlo/'
# dataDir = '_path_to_your_files_'
os.chdir(dataDir)
# os.getcwd()
# names for the files
method='bilinear'
step = 0.4
dem_tls_10cm = 'tls_rinxyz_mean_10cm_' + method + '_step_' + str(step)
dem_sfm_10cm = 'sfm_rinxyz_mean_10cm_' + method + '_step_' + str(step)
diff_sfm_tls_10cm = 'diff_10cm_sfm_tls'
mask_tls_sfm = 'mask_tls_sfm'
n_random = 50 # n runs
npoints_list = [50,100,250,500,1000,2500,5000,10000]
dem_list = [dem_tls_10cm,dem_sfm_10cm]
# run monte carlo sampling and save data as csv
for dem,points in itertools.product(dem_list, npoints_list):
df = pd.DataFrame()
file_out = dem + '_rand_MC_' + str(points)
for run in range(n_random):
col_name = 'rand_' + str(points) + '_' + str(run).zfill(2)
elev = sample_dems_mc(dem, points, mc_run=run, ow_vector=True, ow_what=True, vmask=mask_tls_sfm)
df[col_name] = np.sort(elev)
df.to_csv(path_or_buf=file_out+'.csv', na_rep='NaN')
# reads data from csv files and calculates correlation
dem = dem_tls_10cm
avg_corr = []
df_corr = pd.DataFrame()
for points in npoints_list:
csv_file = dem + '_rand_MC_' + str(points) + '.csv'
#
df = pd.read_csv(csv_file, index_col=0)
# correlation of first column[0] with all the others [1:].
# No need to define column by name
corr = df.corr().iloc[0,1:]
avg_corr.append(corr.mean())
#
# plot correlation values for this set of random points
x_ax = np.empty(n_random -1)
x_ax.fill(points)
plt.plot(x_ax, corr, 'o')
plt.title(dem)
# curve fitting for mean of correlation values
# Initial guess for curve fitting parameters
p0 = [1, 1, 1, 1]
# observations
x = npoints_list
y_meas = avg_corr
# least-squares fitting
plsq = leastsq(residuals, p0, args=(y_meas, x))
equation = 'y = ((A-D)/(1.0+((x/C)**B))) + D'
A = plsq[0][0]
B = plsq[0][1]
C = plsq[0][2]
D = plsq[0][3]
# sequence of values for curve plotting
xx=np.arange(0,10500,25)
# plot fitted curve
plt.plot(xx, peval(xx,plsq[0]), color='0.5', lw=0.9)
plt.plot(x, y_meas, 'o', color='0.5', ms=5)
plt.title(dem)
clean_rand()
# end GRASS GIS session
os.remove(rcfile)
```
| github_jupyter |
## Gene tree estimation error in sliding windows
What size window is too big such that concatenation washes away the differences among genealogies for MSC-based analyses (i.e., ASTRAL, SNAQ).
```
import toytree
import ipcoal
import numpy as np
import ipyrad.analysis as ipa
```
### Set up a phylogenetic model
```
tree = toytree.rtree.unittree(ntips=12, treeheight=12e6, seed=123)
tree.draw(ts='p');
```
### Simulate a chromosome
```
model = ipcoal.Model(
tree=tree,
Ne=1e6,
nsamples=2,
mut=1e-08,
recomb=1e-09,
)
model.sim_loci(nloci=1, nsites=1e5)
```
### Add missing data as spacers between loci and allele dropout
```
# assumed space between RAD tags
SPACER = 5000
CUTLEN = 5
# iterate over each RAD tag
for i in range(0, model.seqs.shape[2], SPACER):
# mask. [0-300=DATA][300-5300=SPACER]
model.seqs[:, :, i+300: i+SPACER] = 9
# allele dropout
cseqs = model.seqs[:, :, i:i+CUTLEN]
aseqs = model.ancestral_seq[0, i:i+CUTLEN]
mask = np.any(cseqs != aseqs, axis=2)[0]
model.seqs[:, mask, i:i+300] = 9
# check that data looks right
model.draw_seqview(0, 250, 350, height=800);
```
### Write data to SEQS HDF5 format
```
model.write_loci_to_hdf5(name="test", outdir="/tmp", diploid=True)
```
### Reformat all genealogies for comparisons with inferred gene trees
The true gene trees will not distinguish among haplotypes, so we will drop one haplotype from each tip, and we will also multiply branch lengths by the mutation rate so that edge lengths are in units of mutations.
```
def convert_genealogy_to_gene_tree(gtree, mu=1e-8):
# multiply by mutation rate
gtree = gtree.set_node_values(
feature="dist",
values={i: j.dist * mu for (i, j) in gtree.idx_dict.items()},
)
# drop the -1 haplotype from each
gtree = gtree.drop_tips([i for i in gtree.get_tip_labels() if "-1" in i])
# drop -0 from names of remaining samples
gtree = gtree.set_node_values(
feature="name",
values={i: j.name[:-2] for (i, j) in gtree.idx_dict.items()},
)
return gtree
# convert genealogies to be gene-tree-like
model.df.genealogy = [
convert_genealogy_to_gene_tree(toytree.tree(i)).write()
for i in model.df.genealogy
]
```
### Save record of the TRUE genealogy at each position
```
model.df.to_csv("/tmp/test.csv")
```
### Visualize tree variation
```
# show the first few trees
toytree.mtree(model.df.genealogy[:10]).draw(2, 4, height=500);
```
### Infer gene trees in sliding windows along the chromosome
```
# raxml inference in sliding windows
ts = ipa.treeslider("/tmp/test.seqs.hdf5", window_size=5e4, slide_size=5e4)
ts.run(auto=True, force=True)
# inferred tree is unrooted
toytree.tree(ts.tree_table.tree[0]).draw();
```
### Infer a species tree from inferred gene trees
```
# infer sptree from inferred gene trees from windows
ast = ipa.astral(ts.tree_table)
ast.run()
toytree.tree(ast.tree).draw();
```
### Infer a species tree from TRUE gene trees
```
# sample one tree every 5000bp
gtrees = []
# select gtree every SPACER LEN bp (THIS IS THE SIZE OF WINDOWS)
for point in range(0, model.df.end.max(), 5000):
# get first tree with start > point
gtree = model.df.loc[model.df.start >= point, "genealogy"].iloc[0]
gtrees.append(gtree)
import ipyrad.analysis as ipa
ast = ipa.astral(gtrees)
ast.run()
ast.tree.draw();
```
### Measure RF distance between trees
The normalized RF distance. Larger value means trees are more different.
```
# get two toytrees to compare
tree1 = toytree.tree(model.df.genealogy[0])
tree2 = toytree.tree(model.df.genealogy[100])
# calculate normalized RF distance
rf, rfmax, _, _, _, _, _ = tree1.treenode.robinson_foulds(tree2.treenode)
print(rf, rfmax, rf / rfmax)
# unresolved tree example RF calc
unresolved = tree1.collapse_nodes(min_dist=5e6)
rf, rfmax, _, _, _, _, _ = unresolved.treenode.robinson_foulds(tree2.treenode, unrooted_trees=True)
print(rf, rfmax, rf / rfmax)
```
### Visualize gene tree error
Some kind of sliding plot ...
```
chrom ----------------------------------------------------------------
windows --------- ---------- ------------
RAD loc - - - - - -
gt erro --- --- --- ---
# separate figure
windowsize x spptree error (astral)
```
| github_jupyter |
# Configuration
NOTES: The warnings after the import are referred to the fact that Tensorflow 2.x versions are built to directly look for a GPU in the system. The warning can be forgot if you are not going to use the GPU.
```
!source myenv/bin/activate
# samples in 5 seconds of audio, 16 KHz sample rate
LENGTH_CHOSEN = 80000
import os
import librosa
import numpy as np
from tqdm.notebook import tqdm
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import seaborn as sns
sns.set_style('whitegrid')
import IPython.display as ipd
import librosa.display
import numpy as np
import pickle
import scipy
import ipywidgets
import math
from time import time
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score, confusion_matrix
from scipy.cluster.hierarchy import dendrogram
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import KFold, StratifiedKFold
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout, Conv2D, AveragePooling1D, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras import regularizers
# from livelossplot import PlotLossesKeras
tf.config.list_physical_devices('GPU')
# import SVM
from sklearn.model_selection import ParameterGrid
from sklearn.svm import SVC
```
# Utils
```
def samples_scaled(df_train, df_val, df_test):
load_train = load_files(df_train)
samples_train = extract_samples(load_train)
labels_train = extract_labels(df_train)
samples_train, labels_train = cut_and_pad(samples_train, labels_train)
samples_train, scaler = minmax_scaling_train(samples_train)
load_val = load_files(df_val)
samples_val = extract_samples(load_val)
labels_val = extract_labels(df_val)
samples_val, labels_val = cut_and_pad(samples_val, labels_val)
samples_val = np.array(samples_val)
samples_val = scaler.transform(samples_val)
labels_val = np.array(labels_val)
load_test = load_files(df_test)
samples_test = extract_samples(load_test)
labels_test = extract_labels(df_test)
samples_test, labels_test = cut_and_pad(samples_test, labels_test)
samples_test = np.array(samples_test)
samples_test = scaler.transform(samples_test)
labels_test = np.array(labels_test)
return samples_train, labels_train, samples_val, labels_val, samples_test, labels_test
def samples_scaled_tess(df_train, df_test):
load_train = load_files(df_train)
samples_train = extract_samples(load_train)
labels_train = extract_labels(df_train)
samples_train, labels_train = cut_and_pad(samples_train, labels_train)
samples_train, scaler = minmax_scaling_train(samples_train)
load_test = load_files(df_test)
samples_test = extract_samples(load_test)
labels_test = extract_labels(df_test)
samples_test, labels_test = cut_and_pad(samples_test, labels_test)
samples_test = np.array(samples_test)
samples_test = scaler.transform(samples_test)
labels_test = np.array(labels_test)
return samples_train, labels_train, samples_test, labels_test
def load_files(df):
X = []
for i in tqdm(df['path']):
X.append(librosa.load(i, res_type='kaiser_fast', sr=16000))
return X
def extract_samples(X):
samples = []
for ind,i in enumerate(X):
samples.append(i[0])
return samples
def extract_labels(df):
labels = df['emotion_label'].copy()
return labels
def compute_lengths(samples):
lengths = [len(x) for x in samples]
return lengths
def check_outliers(lengths):
# outliers
lengths = np.array(lengths)
print((lengths > 300000).sum())
new_lengths = lengths[lengths < 300000]
return new_lengths
def compute_mean_length(lengths):
return lengths.mean()
def cut_and_pad(samples, labels, length_chosen = LENGTH_CHOSEN):
X_new = []
y_new = []
count = 0
for ind,i in enumerate(samples):
if i.shape[0] < 300000:
if i.shape[0] > length_chosen:
new = i[:length_chosen]
X_new.append(new)
elif i.shape[0] < length_chosen:
new = np.pad(i,math.ceil((length_chosen-i.shape[0])/2), mode='median')
X_new.append(new)
else:
X_new.append(i)
y_new.append(labels[count])
count+=1
c = 0
for el in X_new:
if len(el) == 80001:
X_new[c] = el[:-1]
c+=1
return X_new, y_new
'''
def compute_mfccs(samples, n_mfcc):
mfccs = []
for i in tqdm(samples):
mfcc = librosa.feature.mfcc(y=i, sr=16000, n_mfcc=n_mfcc)
mfcc = mfcc.T
mfcc = np.array(mfcc)
mfccs.append(mfcc[:, 1:]) # get rid of the first component
mfccs = np.array(mfccs)
return mfccs
'''
def compute_mfccs(samples, n_mfcc):
mfccs = []
for i in tqdm(samples):
mfcc = librosa.feature.mfcc(y=i, sr=16000, n_mfcc=n_mfcc)
mfcc = mfcc.T
mfcc = np.array(mfcc)
#mfccs.append(mfcc[:, 1:]) # get rid of the first component
mfccs.append(np.mean(mfcc[:, 1:], axis = 0))
mfccs = np.array(mfccs)
return mfccs
def compute_energy(samples):
energy_per_sample = []
for i in tqdm(samples):
energy = librosa.feature.rms(i)
energy = energy.T
energy = np.array(energy)
energy_per_sample.append(energy)
return energy_per_sample
def feature_extractor(df_train, df_val, df_test, n_mfcc):
samples_train, labels_train, samples_val, labels_val, samples_test, labels_test=samples_scaled(df_train, df_val, df_test)
mfccs_train = compute_mfccs(samples_train, n_mfcc = n_mfcc)
mfccs_val = compute_mfccs(samples_val, n_mfcc = n_mfcc)
mfccs_test = compute_mfccs(samples_test, n_mfcc = n_mfcc)
return mfccs_train, labels_train, mfccs_val, labels_val, mfccs_test, labels_test
def feature_extractor_tess(df_train, df_test, n_mfcc):
# we do not have the validation set here
samples_train, labels_train, samples_test, labels_test=samples_scaled(df_train, df_test)
mfccs_train = compute_mfccs(samples_train, n_mfcc = n_mfcc)
mfccs_test = compute_mfccs(samples_test, n_mfcc = n_mfcc)
return mfccs_train, labels_train, mfccs_val, labels_val, mfccs_test, labels_test
def encode_labels(labels_train, labels_val, labels_test):
emotion_enc = {'fear':1, 'disgust':1, 'neutral':0, 'calm':0, 'happy':0, 'sadness':1, 'surprise':0, 'angry':1}
y_train = pd.Series(labels_train).replace(emotion_enc)
y_test = pd.Series(labels_test).map(emotion_enc)
y_val = pd.Series(labels_val).map(emotion_enc)
return y_train, y_val, y_test
def encode_labels_tess(labels_train, labels_test):
emotion_enc = {'fear':1, 'disgust':1, 'neutral':0, 'calm':0, 'happy':0, 'sadness':1, 'surprise':0, 'angry':1}
y_train = pd.Series(labels_train).replace(emotion_enc)
y_test = pd.Series(labels_test).map(emotion_enc)
return y_train, y_test
def standard_scaling(X_train, X_val, X_test):
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train.reshape(-1, X_train.shape[-1])).reshape(X_train.shape)
X_test = scaler.transform(X_test.reshape(-1, X_test.shape[-1])).reshape(X_test.shape)
X_val = scaler.transform(X_val.reshape(-1, X_val.shape[-1])).reshape(X_val.shape)
return X_train, X_val, X_test
def minmax_scaling_train(data):
scaler = MinMaxScaler(feature_range=(-1,1))
data = scaler.fit_transform(data)
return data, scaler
def standard_scaling_tess(X_train, X_test):
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train.reshape(-1, X_train.shape[-1])).reshape(X_train.shape)
X_test = scaler.transform(X_test.reshape(-1, X_test.shape[-1])).reshape(X_test.shape)
return X_train, X_test
```
# Compute dataframes for datasets and split in Train, Val, Test
```
main_path = '/media/helemanc/OS/Users/i2CAT/Desktop/Datasets SER/'
TESS = os.path.join(main_path, "tess/TESS Toronto emotional speech set data/")
RAV = os.path.join(main_path, "ravdess-emotional-speech-audio/audio_speech_actors_01-24")
SAVEE = os.path.join(main_path, "savee/ALL/")
CREMA = os.path.join(main_path, "creamd/AudioWAV/")
```
## RADVESS
```
lst = []
emotion = []
voc_channel = []
full_path = []
modality = []
intensity = []
actors = []
phrase =[]
for root, dirs, files in tqdm(os.walk(RAV)):
for file in files:
try:
#Load librosa array, obtain mfcss, store the file and the mfcss information in a new array
# X, sample_rate = librosa.load(os.path.join(root,file), res_type='kaiser_fast')
# mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)
# The instruction below converts the labels (from 1 to 8) to a series from 0 to 7
# This is because our predictor needs to start from 0 otherwise it will try to predict also 0.
modal = int(file[1:2])
vchan = int(file[4:5])
lab = int(file[7:8])
ints = int(file[10:11])
phr = int(file[13:14])
act = int(file[18:20])
# arr = mfccs, lab
# lst.append(arr)
modality.append(modal)
voc_channel.append(vchan)
emotion.append(lab) #only labels
intensity.append(ints)
phrase.append(phr)
actors.append(act)
full_path.append((root, file)) # only files
# If the file is not valid, skip it
except ValueError:
continue
# 01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised
# merge neutral and calm
emotions_list = ['neutral', 'neutral', 'happy', 'sadness', 'angry', 'fear', 'disgust', 'surprise']
emotion_dict = {em[0]+1:em[1] for em in enumerate(emotions_list)}
df = pd.DataFrame([emotion, voc_channel, modality, intensity, actors, actors,phrase, full_path]).T
df.columns = ['emotion', 'voc_channel', 'modality', 'intensity', 'actors', 'gender', 'phrase', 'path']
df['emotion'] = df['emotion'].map(emotion_dict)
df['voc_channel'] = df['voc_channel'].map({1: 'speech', 2:'song'})
df['modality'] = df['modality'].map({1: 'full AV', 2:'video only', 3:'audio only'})
df['intensity'] = df['intensity'].map({1: 'normal', 2:'strong'})
df['actors'] = df['actors']
df['gender'] = df['actors'].apply(lambda x: 'female' if x%2 == 0 else 'male')
df['phrase'] = df['phrase'].map({1: 'Kids are talking by the door', 2:'Dogs are sitting by the door'})
df['path'] = df['path'].apply(lambda x: x[0] + '/' + x[1])
# remove files with noise to apply the same noise to all files for data augmentation
df = df[~df.path.str.contains('noise')]
df.head()
# only speech
RAV_df = df
RAV_df = RAV_df.loc[RAV_df.voc_channel == 'speech']
RAV_df.insert(0, "emotion_label", RAV_df.emotion, True)
RAV_df = RAV_df.drop(['emotion', 'voc_channel', 'modality', 'intensity', 'phrase'], 1)
RAV_df
RAV_train = []
RAV_val = []
RAV_test = []
for index, row in RAV_df.iterrows():
if row['actors'] in range(1,21):
RAV_train.append(row)
elif row['actors'] in range(21,23):
RAV_val.append(row)
elif row['actors'] in range(23,25):
RAV_test.append(row)
len(RAV_train), len(RAV_val), len(RAV_test)
RAV_train = pd.DataFrame(RAV_train)
RAV_val = pd.DataFrame(RAV_val)
RAV_test = pd.DataFrame(RAV_test)
RAV_train = RAV_train.drop(['actors'], 1)
RAV_val = RAV_val.drop(['actors'], 1)
RAV_test = RAV_test.drop(['actors'], 1)
RAV_train.reset_index(drop=True, inplace = True)
RAV_val.reset_index(drop=True, inplace = True)
RAV_test.reset_index(drop=True, inplace = True )
```
## SAVEE
```
# Get the data location for SAVEE
dir_list = os.listdir(SAVEE)
# parse the filename to get the emotions
emotion=[]
path = []
actors = []
gender = []
for i in dir_list:
actors.append(i[:2])
if i[-8:-6]=='_a':
emotion.append('angry')
gender.append('male')
elif i[-8:-6]=='_d':
emotion.append('disgust')
gender.append('male')
elif i[-8:-6]=='_f':
emotion.append('fear')
gender.append('male')
elif i[-8:-6]=='_h':
emotion.append('happy')
gender.append('male')
elif i[-8:-6]=='_n':
emotion.append('neutral')
gender.append('male')
elif i[-8:-6]=='sa':
emotion.append('sadness')
gender.append('male')
elif i[-8:-6]=='su':
emotion.append('surprise')
gender.append('male')
else:
emotion.append('Unknown')
path.append(SAVEE + i)
# Now check out the label count distribution
SAVEE_df = pd.DataFrame(emotion, columns = ['emotion_label'])
SAVEE_df = pd.concat([SAVEE_df,
pd.DataFrame(actors, columns = ['actors']),
pd.DataFrame(gender, columns = ['gender']),
pd.DataFrame(path, columns = ['path'])], axis = 1)
SAVEE_df.emotion_label.value_counts()
SAVEE_df.head()
SAVEE_train = []
SAVEE_val = []
SAVEE_test = []
#DC, JE, JK, KL
for index, row in SAVEE_df.iterrows():
if row['actors'] == 'DC' or row ['actors'] == 'JE':
SAVEE_train.append(row)
elif row['actors'] == 'JK':
SAVEE_val.append(row)
else:
SAVEE_test.append(row)
len(SAVEE_train), len(SAVEE_val), len(SAVEE_test)
SAVEE_train = pd.DataFrame(SAVEE_train)
SAVEE_val = pd.DataFrame(SAVEE_val)
SAVEE_test = pd.DataFrame(SAVEE_test)
SAVEE_train = SAVEE_train.drop(['actors'], 1)
SAVEE_val = SAVEE_val.drop(['actors'], 1)
SAVEE_test = SAVEE_test.drop(['actors'], 1)
SAVEE_train = SAVEE_train.reset_index(drop=True)
SAVEE_val = SAVEE_val.reset_index(drop=True)
SAVEE_test = SAVEE_test.reset_index(drop=True)
```
## TESS
```
dir_list = os.listdir(TESS)
dir_list.sort()
dir_list
path = []
emotion = []
gender = []
actors = []
for i in dir_list:
fname = os.listdir(TESS + i)
for f in fname:
if i == 'OAF_angry':
emotion.append('angry')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_angry':
emotion.append('angry')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_disgust' :
emotion.append('disgust')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_disgust':
emotion.append('disgust')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_Fear':
emotion.append('fear')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_fear':
emotion.append('fear')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_happy' :
emotion.append('happy')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_happy':
emotion.append('angry')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_neutral':
emotion.append('neutral')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_neutral':
emotion.append('neutral')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_Pleasant_surprise':
emotion.append('surprise')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_pleasant_surprised':
emotion.append('surprise')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_Sad':
emotion.append('sadness')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_sad':
emotion.append('sadness')
gender.append('female')
actors.append('YAF')
else:
emotion.append('Unknown')
path.append(TESS + i + "/" + f)
TESS_df = pd.DataFrame(emotion, columns = ['emotion_label'])
TESS_df = pd.concat([TESS_df, pd.DataFrame(gender, columns = ['gender']),
pd.DataFrame(actors, columns= ['actors']),
pd.DataFrame(path, columns = ['path'])],axis=1)
TESS_df.emotion_label.value_counts()
TESS_df= TESS_df[~TESS_df.path.str.contains('noise')]
TESS_train = []
TESS_test = []
for index, row in TESS_df.iterrows():
if row['actors'] == 'YAF':
TESS_train.append(row)
else:
TESS_test.append(row)
len(TESS_train), len(TESS_test)
TESS_train = pd.DataFrame(TESS_train)
TESS_test = pd.DataFrame(TESS_test)
TESS_train = TESS_train.reset_index(drop=True)
TESS_test = TESS_test.reset_index(drop=True)
```
## CREMA-D
```
males = [1,
5,
11,
14,
15,
16,
17,
19,
22,
23,
26,
27,
31,
32,
33,
34,
35,
36,
38,
39,
41,
42,
44,
45,
48,
50,
51,
57,
59,
62,
64,
65,
66,
67,
68,
69,
70,
71,
77,
80,
81,
83,
85,
86,
87,
88,
90]
females = [ 2,
3,
4,
6,
7,
8,
9,
10,
12,
13,
18,
20,
21,
24,
25,
28,
29,
30,
37,
40,
43,
46,
47,
49,
52,
53,
54,
55,
56,
58,
60,
61,
63,
72,
73,
74,
75,
76,
78,
79,
82,
84,
89,
91]
crema_directory_list = os.listdir(CREMA)
file_emotion = []
file_path = []
actors = []
gender = []
for file in crema_directory_list:
# storing file emotions
part=file.split('_')
# use only high intensity files
if "HI" in part[3] :
actor = part[0][2:]
actors.append(actor)
if int(actor) in males:
gender.append('male')
else:
gender.append('female')
# storing file paths
file_path.append(CREMA + file)
if part[2] == 'SAD':
file_emotion.append('sadness')
elif part[2] == 'ANG':
file_emotion.append('angry')
elif part[2] == 'DIS':
file_emotion.append('disgust')
elif part[2] == 'FEA':
file_emotion.append('fear')
elif part[2] == 'HAP':
file_emotion.append('happy')
elif part[2] == 'NEU':
file_emotion.append('neutral')
else:
file_emotion.append('Unknown')
# dataframe for emotion of files
emotion_df = pd.DataFrame(file_emotion, columns=['emotion_label'])
# dataframe for path of files.
path_df = pd.DataFrame(file_path, columns=['path'])
actors_df = pd.DataFrame(actors, columns=['actors'])
gender_df = pd.DataFrame(gender, columns=['gender'])
Crema_df = pd.concat([emotion_df, actors_df, gender_df, path_df], axis=1)
Crema_df.head()
Crema_df.shape
actor_files = {}
for index, row in Crema_df.iterrows():
actor = row['actors']
if actor not in actor_files.keys():
actor_files[actor] = 1
else:
actor_files[actor]+=1
actor_files
count_males = 0
count_females = 0
male_list = []
for index, row in Crema_df.iterrows():
gender = row['gender']
actor = row['actors']
if gender == 'male':
count_males +=1
if actor not in male_list:
male_list.append(actor)
else:
count_females +=1
count_males, count_females
```
Since there are more males than females we will remove randomly 3 male actors (since there are exactly 5 audio files per actor)
```
import random
random.seed(42)
males_to_remove = random.sample(male_list, 3)
males_to_remove
new_df = []
for index, row in Crema_df.iterrows():
if row['actors'] not in males_to_remove:
new_df.append(row)
CREMA_df = pd.DataFrame(new_df)
for index, row in CREMA_df.iterrows():
if row['actors'] == '17':
print("Elements not removed")
count_males = 0
count_females = 0
male_list = []
female_list = []
for index, row in CREMA_df.iterrows():
gender = row['gender']
actor = row['actors']
if gender == 'male':
count_males +=1
if actor not in male_list:
male_list.append(actor)
else:
count_females +=1
if actor not in female_list:
female_list.append(actor)
count_males, count_females
len(female_list)
len(male_list)
CREMA_train = []
CREMA_val = []
CREMA_test = []
females_train = random.sample(female_list, 32)
males_train = random.sample(male_list, 32)
# remove the elements assigned to train
for element in females_train:
if element in female_list:
female_list.remove(element)
for element in males_train:
if element in male_list:
male_list.remove(element)
females_val = random.sample(female_list, 6)
males_val = random.sample(male_list, 6)
# remove the elements assigned to val
for element in females_val:
if element in female_list:
female_list.remove(element)
for element in males_val:
if element in male_list:
male_list.remove(element)
females_test = random.sample(female_list, 6)
males_test = random.sample(male_list, 6)
females_train, males_train, females_val, males_val, females_test, males_test
train = females_train + males_train
val = females_val + males_val
test = females_test + males_test
for index, row in CREMA_df.iterrows():
gender = row['gender']
actor = row['actors']
if actor in train:
CREMA_train.append(row)
elif actor in val:
CREMA_val.append(row)
else:
CREMA_test.append(row)
CREMA_train = pd.DataFrame(CREMA_train)
CREMA_val = pd.DataFrame(CREMA_val)
CREMA_test = pd.DataFrame(CREMA_test)
CREMA_train.shape, CREMA_val.shape, CREMA_test.shape
CREMA_train = CREMA_train.reset_index(drop=True)
CREMA_val = CREMA_val.reset_index(drop = True)
```
# Random Search parameters
```
param_grid_clf = {'C': [0.1,1, 10, 100], 'kernel': ['rbf', 'linear']}
svc = SVC()
```
# Experiment 1.1 : RAVDESS
```
df_train = RAV_train
df_val = RAV_val
df_test = RAV_test
df_train.reset_index(drop = True, inplace = True)
df_val.reset_index(drop = True, inplace = True)
df_test.reset_index(drop = True, inplace = True)
```
## Feature Extraction
```
X_train, y_train, X_val, y_val, X_test, y_test = feature_extractor(df_train, df_val, df_test, 26)
y_train, y_val, y_test = encode_labels(y_train, y_val, y_test)
np.size(y_val)
X_train, X_val, X_test = standard_scaling(X_train, X_val, X_test)
X_train.shape
len(X_train[0])
y_train.shape
```
## Shuffle training data
```
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
```
## Hypeparameter optimization
```
# classweight
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
class_weights = {l:c for l,c in zip(np.unique(y_train), class_weights)}
rand_search = RandomizedSearchCV(estimator=svc,
param_distributions=param_grid_clf,
n_jobs = -1,
cv=KFold(3))
print("Performing Randomized Search...")
t0 = time()
rand_search.fit(X_train, y_train)
print("\nDone in %0.3fs" % (time()-t0))
print()
best_params = rand_search.best_params_
best_clf = rand_search.best_estimator_
best_params
best_clf = SVC(C=best_params.get('C'), kernel=best_params.get('kernel'), class_weight=class_weights)
best_clf.fit(X_train, y_train)
```
## Testing
```
print("Testing...")
pred = best_clf.predict(X_test)
accuracy = best_clf.score(X_test, y_test)
print("Accuracy: %s" %str(accuracy))
```
| github_jupyter |
You are going to read some data, take a look at it, smooth it, and think about
whether the objects you've found are real.
I've provided three python files:
- detection.py Some code to detect objects
- imageProc.py Some image processing code to get you started
- utils.py Convenience functions for a Data object, I/O, and image display
There are also some data files. These started out as fits (as read with pyfits.py, not provided) but
I saved them as numpy ".npy" files (to be read with numpy.load).
```
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
%matplotlib inline
# notebook
import detection
import imageProc
import utils
```
Let's take a look at some data.
Rather than asking you to install a display tool such as ds9, ginga, firefly, or aladin I've provided you with a lightweight image display tool, utils.mtv()
The coloured overlay shows the mask bits that tells you which pixels are bad (its visibility is controlled
by the alpha parameter). The stretch is controlled by "b" (it's roughly the transition from a linear to a
logarithmic stretch).
You can print the value of pixel (x, y) with
print da.image[y, x]
(note the order of the indices). Here x and y can be scalars or numpy arrays
```
da = utils.Data()
da.read()
utils.mtv(da, b=10, alpha=0.8)
xlim, ylim = (80, 400), (100, 400)
plt.xlim(xlim); plt.ylim(ylim)
plt.show()
```
We can show the same data without marking the bad pixels -- you'll see that I fixed them. Magic
```
utils.mtv(da, b=10, alpha=0.0, fig=2)
plt.xlim(xlim); plt.ylim(ylim)
plt.show()
```
If you want to look at the raw data, you can:
```
raw = utils.Data()
raw.read(readRaw=True)
utils.mtv(raw, b=10, alpha=0.3)
plt.xlim(740, 810); plt.ylim(230, 290)
plt.show()
```
Next write a function to smooth the data with a Gaussian filter. You can do the work with ```convolveWithGaussian``` in the next cell.
_N.b._ You can make a copy of a ```Data``` object using ```da.copy()```
```
def gaussian2D(beta):
size = int(3*abs(beta) + 1)
x, y = np.mgrid[-size:size+1, -size:size+1]
phi = np.exp(-(x**2 + y**2)/(2*beta**2))
phi /= phi.sum()
return phi
def convolveWithGaussian(image, beta):
phi = gaussian2D(beta)
return scipy.signal.convolve(image, phi, mode='same')
# %%timeit -n 1 -r 1
sda = da.copy()
beta = 2.5
sda.image = convolveWithGaussian(sda.image, beta)
utils.mtv(sda.image)
```
We can also calculate the filter's _effective area_ (and confirm or deny that I did my Gaussian integrals correctly in the lecture)
```
phi = gaussian2D(beta)
n_eff = 1/np.sum(phi**2)
print "n_eff = %.3f (analytically: %.3f)" % (n_eff, 4*pi*beta**2)
```
That convolution seemed slow to me. Go back to the cell, uncomment the ```%%timeit``` line, and run it again. How long did it take?
OK, take a look at the next cell and see if you can see what I did -- it's more python (and loops too) so it must be slower. Is it?
```
def convolveWithGaussian(image, beta):
def gaussian1D(beta):
size = int(3*abs(beta) + 1)
x = np.arange(-size, size+1)
phi = np.exp(-x**2/(2*beta**2))
phi /= phi.sum()
return phi
beta = 2.5
phi = gaussian1D(beta)
for y in range(0, image.shape[0]):
image[y] = scipy.signal.convolve(image[y], phi, mode='same')
for x in range(0, image.shape[1]):
image[:, x] = scipy.signal.convolve(image[:, x], phi, mode='same')
return image
```
Now let's look for objects. We know how to do this; we smooth the image with the PSF then look for peaks. It's not totally trivial to find all the sets of connected pixels, so I provided you with a function ```detection.findObjects``` to do the work
```
nsigma = 3.5
threshold = nsigma*sqrt(np.median(sda.variance)/n_eff)
footprints = detection.findObjects(sda.image, threshold, grow=3)
print "I found %d objects" % (len(footprints))
```
We can look at all our objects by looping over the footprints:
```
nShow = 10
for foot in footprints.values()[0:nShow]:
print "(%5d, %5d) %3d" % (foot.centroid[0], foot.centroid[1], foot.npix)
if len(footprints) > nShow:
print "..."
```
Or by setting a mask plane -- this way we'll be able to see all the pixels
```
sda.clearMaskPlane("DETECTED")
detection.setMaskFromFootprints(sda, footprints, "DETECTED")
utils.mtv(sda)
plt.xlim(xlim); plt.ylim(ylim)
plt.show()
```
We can do the same thing for the original (unsmoothed) image
```
da.clearMaskPlane("DETECTED")
detection.setMaskFromFootprints(da, footprints, "DETECTED")
utils.mtv(da, alpha=0.3)
plt.xlim(xlim); plt.ylim(ylim)
plt.show()
```
I lied to you; or at least I didn't tell you everything. That 'data' was actually the output from the LSST simulator, which means that I know the Truth; more accurately, I know the location of every photon that arrived from the sources without any sky background. The pixels are 0.2 arcseconds on a side.
Let's overlay the detection mask on the truth.
```
t = utils.Data(image=da.truth, mask=sda.mask)
utils.mtv(t, I0=1, b=0.01, alpha=0.6)
plt.xlim(xlim); plt.ylim(ylim)
plt.show()
```
If you look at the direct image you can see things that seem real when you compare with the truth, for example the object at (156, 205). So should we be using a lower threshold? What happens if you choose a smaller value?
OK, so that picked up the object I pointed out, but it picked up some noise too. How many false objects would I expect to detect per square degree? Naïvely we'd expect each PSF-sized patch to be independent, so we can try using the tails of a Gaussian to estimate how many objects we'd detect per square degree. If I take the area of a PSF to be 0.5 arcsec^2, I have
```
import scipy.special
pixelSize = 0.200
nPerPsf = 0.5*scipy.special.erfc(nsigma/sqrt(2))
nPerDeg = nPerPsf*3600**2/0.5
print "False positives per degree: %d In data: %d" % (
nPerDeg, nPerDeg/(3600/(da.image.shape[0]*pixelSize))**2)
```
Nick Kaiser has done the theory more carefully (it was easy for him; he used results from a classic paper, Bardeen et. al, of which he was a co-author). The answer is that the number of peaks per-arcsecond is
$$
\frac{1}{2^{5/2} \pi^{3/2} \beta^2} n_\sigma e^{-n_\sigma^2/2}
$$
I'm not as clever as Nick, but I do have access to a computer...
```
# %%timeit -n 1 -r 1
detection = reload(detection)
ndeg = 1.0/2.0 # Size of image we'll simulate (in degrees)
size = int(3600*ndeg/pixelSize) # Size of image we'll simulate (in pixels)
im = np.zeros((size, size))
nsigma, Poisson= 5, False
np.random.seed(667)
sigma = 10
if Poisson:
mu = sigma**2
im += np.random.poisson(lam=mu, size=size*size).reshape(size, size) - mu
else:
im += np.random.normal(scale=sigma, size=size*size).reshape(size, size)
sim = convolveWithGaussian(im, beta)
n_eff = 4*pi*beta**2 # Effective area of PSF
threshold = nsigma*sigma/sqrt(n_eff)
footprints = detection.findObjects(sim, threshold, grow=0)
print "%s %g %d %.1f" % (("Poisson" if Poisson else "Gaussian"), nsigma, \
len(footprints)/ndeg**2, \
3600**2*1/(2**2.5*pi**1.5*(beta*pixelSize)**2)*nsigma*exp(-nsigma**2/2))
if not False:
tmp = utils.Data(sim)
tmp.clearMaskPlane("DETECTED")
detection.setMaskFromFootprints(tmp, footprints, "DETECTED")
utils.mtv(tmp, alpha=1)
```
By patiently running the previous cell several times, I arrived at:
nsigma Gaussian Poisson Gaussian Prediction
3 52288 55008 54848.1
3.5 11888 13296 12600
5 0 48 18.6
5.5 2 16 2.4
6 0 0 0.2
| github_jupyter |
# Building and Visualizing word frequencies
In this lab, we will focus on the `build_freqs()` helper function and visualizing a dataset fed into it. In our goal of tweet sentiment analysis, this function will build a dictionary where we can lookup how many times a word appears in the lists of positive or negative tweets. This will be very helpful when extracting the features of the dataset in the week's programming assignment. Let's see how this function is implemented under the hood in this notebook.
## Setup
Let's import the required libraries for this lab:
```
import nltk # Python library for NLP
from nltk.corpus import twitter_samples # sample Twitter dataset from NLTK
import matplotlib.pyplot as plt # visualization library
import numpy as np # library for scientific computing and matrix operations
```
#### Import some helper functions that we provided in the utils.py file:
* `process_tweet()`: Cleans the text, tokenizes it into separate words, removes stopwords, and converts words to stems.
* `build_freqs()`: This counts how often a word in the 'corpus' (the entire set of tweets) was associated with a positive label `1` or a negative label `0`. It then builds the `freqs` dictionary, where each key is a `(word,label)` tuple, and the value is the count of its frequency within the corpus of tweets.
```
# download the stopwords for the process_tweet function
nltk.download('stopwords')
# import our convenience functions
from utils import process_tweet, build_freqs
```
## Load the NLTK sample dataset
As in the previous lab, we will be using the [Twitter dataset from NLTK](http://www.nltk.org/howto/twitter.html#Using-a-Tweet-Corpus).
```
# select the lists of positive and negative tweets
all_positive_tweets = twitter_samples.strings('positive_tweets.json')
all_negative_tweets = twitter_samples.strings('negative_tweets.json')
# concatenate the lists, 1st part is the positive tweets followed by the negative
tweets = all_positive_tweets + all_negative_tweets
# let's see how many tweets we have
print("Number of tweets: ", len(tweets))
```
Next, we will build a labels array that matches the sentiments of our tweets. This data type works pretty much like a regular list but is optimized for computations and manipulation. The `labels` array will be composed of 10000 elements. The first 5000 will be filled with `1` labels denoting positive sentiments, and the next 5000 will be `0` labels denoting the opposite. We can do this easily with a series of operations provided by the `numpy` library:
* `np.ones()` - create an array of 1's
* `np.zeros()` - create an array of 0's
* `np.append()` - concatenate arrays
```
# make a numpy array representing labels of the tweets
labels = np.append(np.ones((len(all_positive_tweets))), np.zeros((len(all_negative_tweets))))
```
## Dictionaries
In Python, a dictionary is a mutable and indexed collection. It stores items as key-value pairs and uses [hash tables](https://en.wikipedia.org/wiki/Hash_table) underneath to allow practically constant time lookups. In NLP, dictionaries are essential because it enables fast retrieval of items or containment checks even with thousands of entries in the collection.
### Definition
A dictionary in Python is declared using curly brackets. Look at the next example:
```
dictionary = {'key1': 1, 'key2': 2}
print(dictionary)
```
The former line defines a dictionary with two entries. Keys and values can be almost any type ([with a few restriction on keys](https://docs.python.org/3/tutorial/datastructures.html#dictionaries)), and in this case, we used strings. We can also use floats, integers, tuples, etc.
### Adding or editing entries
New entries can be inserted into dictionaries using square brackets. If the dictionary already contains the specified key, its value is overwritten.
```
# Add a new entry
dictionary['key3'] = -5
# Overwrite the value of key1
dictionary['key1'] = 0
print(dictionary)
```
### Accessing values and lookup keys
Performing dictionary lookups and retrieval are common tasks in NLP. There are two ways to do this:
* Using square bracket notation: This form is allowed if the lookup key is in the dictionary. It produces an error otherwise.
* Using the [get()](https://docs.python.org/3/library/stdtypes.html#dict.get) method: This allows us to set a default value if the dictionary key does not exist.
Let us see these in action:
```
# Square bracket lookup when the key exist
print(dictionary['key2'])
```
However, if the key is missing, the operation produce an error
```
# The output of this line is intended to produce a KeyError
print(dictionary['key8'])
```
When using a square bracket lookup, it is common to use an if-else block to check for containment first (with the keyword `in`) before getting the item. On the other hand, you can use the `.get()` method if you want to set a default value when the key is not found. Let's compare these in the cells below:
```
# This prints a value
if 'key1' in dictionary:
print("item found: ", dictionary['key1'])
else:
print('key1 is not defined')
# Same as what you get with get
print("item found: ", dictionary.get('key1', -1))
# This prints a message because the key is not found
if 'key7' in dictionary:
print(dictionary['key7'])
else:
print('key does not exist!')
# This prints -1 because the key is not found and we set the default to -1
print(dictionary.get('key7', -1))
```
## Word frequency dictionary
Now that we know the building blocks, let's finally take a look at the **build_freqs()** function in **utils.py**. This is the function that creates the dictionary containing the word counts from each corpus.
```python
def build_freqs(tweets, ys):
"""Build frequencies.
Input:
tweets: a list of tweets
ys: an m x 1 array with the sentiment label of each tweet
(either 0 or 1)
Output:
freqs: a dictionary mapping each (word, sentiment) pair to its
frequency
"""
# Convert np array to list since zip needs an iterable.
# The squeeze is necessary or the list ends up with one element.
# Also note that this is just a NOP if ys is already a list.
yslist = np.squeeze(ys).tolist()
# Start with an empty dictionary and populate it by looping over all tweets
# and over all processed words in each tweet.
freqs = {}
for y, tweet in zip(yslist, tweets):
for word in process_tweet(tweet):
pair = (word, y)
if pair in freqs:
freqs[pair] += 1
else:
freqs[pair] = 1
return freqs
```
You can also do the for loop like this to make it a bit more compact:
```python
for y, tweet in zip(yslist, tweets):
for word in process_tweet(tweet):
pair = (word, y)
freqs[pair] = freqs.get(pair, 0) + 1
```
As shown above, each key is a 2-element tuple containing a `(word, y)` pair. The `word` is an element in a processed tweet while `y` is an integer representing the corpus: `1` for the positive tweets and `0` for the negative tweets. The value associated with this key is the number of times that word appears in the specified corpus. For example:
```
# "folowfriday" appears 25 times in the positive tweets
('followfriday', 1.0): 25
# "shame" appears 19 times in the negative tweets
('shame', 0.0): 19
```
Now, it is time to use the dictionary returned by the `build_freqs()` function. First, let us feed our `tweets` and `labels` lists then print a basic report:
```
# create frequency dictionary
freqs = build_freqs(tweets, labels)
# check data type
print(f'type(freqs) = {type(freqs)}')
# check length of the dictionary
print(f'len(freqs) = {len(freqs)}')
```
Now print the frequency of each word depending on its class.
```
print(freqs)
```
Unfortunately, this does not help much to understand the data. It would be better to visualize this output to gain better insights.
## Table of word counts
We will select a set of words that we would like to visualize. It is better to store this temporary information in a table that is very easy to use later.
```
# select some words to appear in the report. we will assume that each word is unique (i.e. no duplicates)
keys = ['happi', 'merri', 'nice', 'good', 'bad', 'sad', 'mad', 'best', 'pretti',
'❤', ':)', ':(', '😒', '😬', '😄', '😍', '♛',
'song', 'idea', 'power', 'play', 'magnific']
# list representing our table of word counts.
# each element consist of a sublist with this pattern: [<word>, <positive_count>, <negative_count>]
data = []
# loop through our selected words
for word in keys:
# initialize positive and negative counts
pos = 0
neg = 0
# retrieve number of positive counts
if (word, 1) in freqs:
pos = freqs[(word, 1)]
# retrieve number of negative counts
if (word, 0) in freqs:
neg = freqs[(word, 0)]
# append the word counts to the table
data.append([word, pos, neg])
data
```
We can then use a scatter plot to inspect this table visually. Instead of plotting the raw counts, we will plot it in the logarithmic scale to take into account the wide discrepancies between the raw counts (e.g. `:)` has 3568 counts in the positive while only 2 in the negative). The red line marks the boundary between positive and negative areas. Words close to the red line can be classified as neutral.
```
fig, ax = plt.subplots(figsize = (8, 8))
# convert positive raw counts to logarithmic scale. we add 1 to avoid log(0)
x = np.log([x[1] + 1 for x in data])
# do the same for the negative counts
y = np.log([x[2] + 1 for x in data])
# Plot a dot for each pair of words
ax.scatter(x, y)
# assign axis labels
plt.xlabel("Log Positive count")
plt.ylabel("Log Negative count")
# Add the word as the label at the same position as you added the points just before
for i in range(0, len(data)):
ax.annotate(data[i][0], (x[i], y[i]), fontsize=12)
ax.plot([0, 9], [0, 9], color = 'red') # Plot the red line that divides the 2 areas.
plt.show()
```
This chart is straightforward to interpret. It shows that emoticons `:)` and `:(` are very important for sentiment analysis. Thus, we should not let preprocessing steps get rid of these symbols!
Furthermore, what is the meaning of the crown symbol? It seems to be very negative!
### That's all for this lab! We've seen how to build a word frequency dictionary and this will come in handy when extracting the features of a list of tweets. Next up, we will be reviewing Logistic Regression. Keep it up!
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_3_transfer_cv.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 9: Regularization: L1, L2 and Dropout**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 9 Material
* Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=WLlP6S-Z8Xs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_1_keras_transfer.ipynb)
* Part 9.2: Popular Pretrained Neural Networks for Keras [[Video]](https://www.youtube.com/watch?v=ctVA1_46YEE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_2_popular_transfer.ipynb)
* **Part 9.3: Transfer Learning for Computer Vision and Keras** [[Video]](https://www.youtube.com/watch?v=61vMUm_XBMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_3_transfer_cv.ipynb)
* Part 9.4: Transfer Learning for Languages and Keras [[Video]](https://www.youtube.com/watch?v=ajmAAg9FxXA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_4_transfer_nlp.ipynb)
* Part 9.5: Transfer Learning for Keras Feature Engineering [[Video]](https://www.youtube.com/watch?v=Dttxsm8zpL8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_5_transfer_feature_eng.ipynb)
# Google CoLab Instructions
The following code ensures that Google CoLab is running the correct version of TensorFlow.
```
# Start CoLab
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
```
# Part 9.3: Transfer Learning for Computer Vision and Keras
In this part, we will use transfer learning to create a simple neural network that can recognize dog breeds. To keep the example simple, we will only train for a handful of breeds. You can find a much more advanced form of this model at the [Microsoft Dog Breed Image Search](https://www.bing.com/visualsearch/Microsoft/WhatDog).
To keep computation times to a minimum, we will make use of the MobileNet included in Keras. We will begin by loading the entire MobileNet and seeing how well it classifies with several test images. MobileNet can classify 1,000 different images. We will ultimately extend it to classify image types that are not in its dataset, in this example, three dog breeds. However, we begin by classifying image types among those in MobileNet's original training set. Even though our test images were not in its training set, the loaded neural network should classify them. Just as before, the program instantiates two environments: one for training and one for evaluation.
```
import pandas as pd
import numpy as np
import os
import tensorflow.keras
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense,GlobalAveragePooling2D
from tensorflow.keras.applications import MobileNet
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.mobilenet import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
```
We begin by downloading weights for a MobileNet trained for the imagenet dataset, which will take some time to download the first time you train the network.
```
model = MobileNet(weights='imagenet',include_top=True)
```
The loaded network is a Keras neural network. However, this is a neural network that a third party engineered on advanced hardware. Merely looking at the structure of an advanced state-of-the-art neural network can be educational.
```
model.summary()
```
Just examining the above structure, several clues to neural network architecture become evident.
Notice how some of the layers have zeros in their number of parameters. The summary always displays hyperparameters as zero. The neural network fitting process does not change hyperparameters. The other layers have learnable parameters that are adjusted as training occurs. The layer types are all hyperparameters; Keras will not change a convolution layer to a max-pooling layer. However, the layers that have parameters are trained/adjusted by the training algorithm. Most of the parameters seen above are the weights of the neural network.
The programmer can configure some of the parameters as non-trainable. The training algorithm cannot adjust these. When we later use transfer learning with this model, we will strip off the final layers that classify 1000 items and replace them with our three dog breed classification layer. Only our new layers will be trainable; we will mark the existing layers as non-trainable.
This neural network makes extensive use of the Relu activation function. Relu is a common choice for activation functions. Also, the neural network makes use of batch and dropout normalization. Many deep neural networks are pyramid-shaped, and this is the case for this one. This neural network uses and expanding pyramid shape as you can see the neuron/filter counts grow from 32 to 64 to 128 to 256 to 512 and max out at 1,024.
We will now use the MobileNet to classify several image URL's below. You can add additional URL's of your own to see how well the MobileNet can classify.
```
%matplotlib inline
from PIL import Image, ImageFile
from matplotlib.pyplot import imshow
import requests
import numpy as np
from io import BytesIO
from IPython.display import display, HTML
from tensorflow.keras.applications.mobilenet import decode_predictions
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
IMAGE_CHANNELS = 3
images = [
"https://cdn.shopify.com/s/files/1/0712/4751/products/SMA-01_2000x.jpg?v=1537468751",
"https://farm2.static.flickr.com/1394/967537586_87b1358ad3.jpg",
"https://sites.wustl.edu/jeffheaton/files/2016/07/jheaton_wustl1-262izm5-458x458.jpg",
"https://1.bp.blogspot.com/-0vGbvWUrSAA/XP-OurPTA4I/AAAAAAAAgtg/"\
"TGx6YiGBEGIMjnViDjvVnYzYp__DJ6I-gCLcBGAs/s320/B%252Bt%2525aMbJQkm3Z50rqput%252BA.jpg"
]
def make_square(img):
cols,rows = img.size
if rows>cols:
pad = (rows-cols)/2
img = img.crop((pad,0,cols,cols))
else:
pad = (cols-rows)/2
img = img.crop((0,pad,rows,rows))
return img
for url in images:
x = []
ImageFile.LOAD_TRUNCATED_IMAGES = False
response = requests.get(url)
img = Image.open(BytesIO(response.content))
img.load()
img = img.resize((IMAGE_WIDTH,IMAGE_HEIGHT),Image.ANTIALIAS)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
pred = model.predict(x)
display("___________________________________________________________________________________________")
display(img)
print(np.argmax(pred,axis=1))
lst = decode_predictions(pred, top=5)
for itm in lst[0]:
print(itm)
```
Overall, the neural network is doing quite well. However, it does not classify me as a "person"; instead, it classifies me as a "suit." Similarly, it incorrectly classifies my English Bulldog Hickory as a "pug". My dog's mistaken classification might be forgivable, as I am only providing a closeup of his face.
For many applications, MobileNet might be entirely acceptable as an image classifier. However, if you need to classify very specialized images not in the 1,000 image types supported by imagenet, it is necessary to use transfer learning.
### Transfer
It is possible to create your image classification network from scratch. This endeavor would take considerable time and resources. Just creating a dog breed classifier would require many pictures of dogs, labeled by breed. Using a pretrained neural network, you are tapping into knowledge already built into the lower layers of the neural network. The transferred layers likely already have some notion of eyes, ears, feet, and fur. These lower-level concepts help to train the neural network to identify dog breeds.
Next, we reload the MobileNet; however, we set the *include_top* parameter to *False*. This setting instructs Keras not to load the final classification layers. This setting is the common mode of operation for transfer learning. We display a summary to see that the top classification layer is now missing.
```
base_model=MobileNet(weights='imagenet',include_top=False)
#imports the mobilenet model and discards the last 1000 neuron layer.
base_model.summary()
```
We will add new top layers to the neural network. Our final SoftMax layer includes support for 3 classes.
```
x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(1024,activation='relu')(x)
x=Dense(1024,activation='relu')(x)
preds=Dense(3,activation='softmax')(x)
```
Next, we mark the original MobileNet layers as non-trainable and our new layers as trainable.
```
model=Model(inputs=base_model.input,outputs=preds)
for layer in model.layers[:20]:
layer.trainable=False
for layer in model.layers[20:]:
layer.trainable=True
```
To train the neural network, we must create a directory structure to hold the images. The Keras command **flow_from_directory** performs this for us. It requires that a folder be laid out as follows. Each class is a folder that contains images of that class. We can also specify a target size; in this case the original MobileNet size of 224x224 is desired.
```
if COLAB:
PATH = ""
else:
PATH = "./data/transfer"
train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator=train_datagen.flow_from_directory('c:\\jth\\data\\trans',
target_size=(224,224),
color_mode='rgb',
batch_size=1,
class_mode='categorical',
shuffle=True)
```
We are now ready to compile and fit the neural network. Notice we are using **fit_generator** rather than **fit**. This choice is because we are using the convenient **ImageDataGenerator**.
```
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
step_size_train=train_generator.n//train_generator.batch_size
model.fit_generator(generator=train_generator,
steps_per_epoch=step_size_train,
epochs=50)
```
We are now ready to see how our new model can predict dog breeds. The URLs in the code below provide several example dogs. Feel free to add your own.
```
%matplotlib inline
from PIL import Image, ImageFile
from matplotlib.pyplot import imshow
import requests
import numpy as np
from io import BytesIO
from IPython.display import display, HTML
from tensorflow.keras.applications.mobilenet import decode_predictions
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
IMAGE_CHANNELS = 3
images = [
"https://upload.wikimedia.org/wikipedia/commons/thumb/a/a8/02.Owczarek_niemiecki_u%C5%BCytkowy_kr%C3%B3tkow%C5%82osy_suka.jpg/2560px-02.Owczarek_niemiecki_u%C5%BCytkowy_kr%C3%B3tkow%C5%82osy_suka.jpg",
"https://upload.wikimedia.org/wikipedia/commons/5/51/DSHwiki.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/e5/Axel%2C_the_English_Bulldog.jpg/440px-Axel%2C_the_English_Bulldog.jpg",
"https://1.bp.blogspot.com/-0vGbvWUrSAA/XP-OurPTA4I/AAAAAAAAgtg/TGx6YiGBEGIMjnViDjvVnYzYp__DJ6I-gCLcBGAs/s320/B%252Bt%2525aMbJQkm3Z50rqput%252BA.jpg",
"https://thehappypuppysite.com/wp-content/uploads/2017/12/poodle1.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/4/40/Pudel_Grossschwarz.jpg/440px-Pudel_Grossschwarz.jpg"
]
def make_square(img):
cols,rows = img.size
if rows>cols:
pad = (rows-cols)/2
img = img.crop((pad,0,cols,cols))
else:
pad = (cols-rows)/2
img = img.crop((0,pad,rows,rows))
return img
for url in images:
x = []
ImageFile.LOAD_TRUNCATED_IMAGES = False
response = requests.get(url)
img = Image.open(BytesIO(response.content))
img.load()
img = img.resize((IMAGE_WIDTH,IMAGE_HEIGHT),Image.ANTIALIAS)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
pred = model.predict(x)
display("___________________________________________________________________________________________")
display(img)
print(np.argmax(pred,axis=1))
```
| github_jupyter |
# Prepare_email tutorial
The Melusine **prepare_email subpackage** provides preprocessing subpackages all providing several preprocessing functions to be applied in a particular order.
**The functions are all designed to be applied on rows of dataframes. They should be wrapped in a TransformerScheduler object before being integrated into an execution Pipeline.**
```
from melusine.data.data_loader import load_email_data
df_emails = load_email_data()
```
## Manage_transfer_reply subpackage
The manage_transfer_reply subpackage provides several functions to preprocess the transfers and replies contained in the body of an email. All the functions are all designed to be applied on rows of dataframes.
### check_mail_begin_by_transfer function
**check_mail_begin_by_transfer** returns True if the *body* starts with given regex 'begin_transfer', False if not.
```
from melusine.prepare_email.manage_transfer_reply import check_mail_begin_by_transfer
row_with_direct_transfer = df_emails.loc[0,:]
print(row_with_direct_transfer.body)
print('\n')
print(check_mail_begin_by_transfer(row_with_direct_transfer))
row_without_direct_transfer = df_emails.loc[5,:]
print(row_without_direct_transfer.body)
print('\n')
print(check_mail_begin_by_transfer(row_without_direct_transfer))
```
### update_info_for_transfer_mail function
** update_info_for_transfer_mail** extracts and updates informations from emails if the value of the **is_begin_transfer** column returned by the **check_mail_begin_by_transfer** fuction is True.
The informations are extracted from the **body** column to update the following columns :
- **header**
- **from**
- **to**
- **date**
The **body** column will then be cleaned of the updated informations.
```
from melusine.prepare_email.manage_transfer_reply import update_info_for_transfer_mail
row_with_direct_transfer = df_emails.loc[0,:].copy()
print(row_with_direct_transfer.body)
print('\n')
print(row_with_direct_transfer.header)
print(row_with_direct_transfer.date)
print(row_with_direct_transfer['from'])
print(row_with_direct_transfer.to)
row_with_direct_transfer['is_begin_by_transfer'] = check_mail_begin_by_transfer(row_with_direct_transfer)
row_with_direct_transfer['is_begin_by_transfer']
row_with_direct_transfer = update_info_for_transfer_mail(row_with_direct_transfer)
print(row_with_direct_transfer.body)
print('\n')
print(row_with_direct_transfer.header)
print(row_with_direct_transfer.date)
print(row_with_direct_transfer['from'])
print(row_with_direct_transfer.to)
```
### add_boolean_answer function
**add_boolean_answer function** returns True if the **header** column indicates that the email is a reply, False if not.
```
from melusine.prepare_email.manage_transfer_reply import add_boolean_answer
row_with_answer = df_emails.loc[2,:]
row_with_answer.header
add_boolean_answer(row_with_answer)
df_emails['is_answer'] = df_emails.apply(add_boolean_answer, axis=1)
df_emails[['is_answer','body']]
```
### add_boolean_transfer function
**add_boolean_transfer function** returns True if the **header** column indicates that the email is a transfer, False if not.
```
from melusine.prepare_email.manage_transfer_reply import add_boolean_transfer
row_with_transfer = df_emails.loc[6,:]
row_with_transfer.header
add_boolean_transfer(row_with_transfer)
```
### manage_transfer_reply transformer
The functions of the manage_transfer_reply subpackage can be wrapped in a TransformerScheduler object to be applied directly on a dataframe :
```
from melusine.utils.transformer_scheduler import TransformerScheduler
ManageTransferReplyTransformer = TransformerScheduler(
functions_scheduler=[
(check_mail_begin_by_transfer, None, ['is_begin_by_transfer']),
(update_info_for_transfer_mail, None, None),
(add_boolean_answer, None, ['is_answer']),
(add_boolean_transfer, None, ['is_transfer'])
]
)
df_emails = load_email_data()
df_emails.columns
df_emails = ManageTransferReplyTransformer.fit_transform(df_emails)
df_emails.columns
```
## Build_historic and mail_segmenting subpackage
### build_historic function
The **build_historic subpackage** provides a **build_historic function** to segment the messages components of the **body column** of an email.
It returns a list of dictionaries, one dictionary per message in inverse chronological order (the first dictionary corresponds to the last message while the last dictionary corresponds to the first message). Each dictionary has two keys:
{'text': raw text without metadata,
'meta': metadata
}..
**build_historic** is designed to be applied on rows of dataframes.
```
row = df_emails.loc[2,:].copy()
print(row.body)
from melusine.prepare_email.build_historic import build_historic
row['structured_historic'] = build_historic(row)
```
There is no metadata for the last message.
```
print('Text of last message :')
print(row['structured_historic'][0]['text'])
print('\n')
print('Metadata of last message :')
print(row['structured_historic'][0]['meta'])
print('\n')
print('Text of first message :')
print(row['structured_historic'][1]['text'])
print('\n')
print('Metadata of first message :')
print(row['structured_historic'][1]['meta'])
```
### structure_email function
The **mail_segmenting subpackage** provides a **structure_email function** to further segment the messages components of the **structured_historic column** which should contain the result of the **build_historic function** previously applied:
- meta : the date, from and to components of the metadata will be segmented.
- text : the header will be segmented from the text. The different parts of the text will be segmented and tagged (hello, body, greetings, signature, footer..)
It returns a list of dictionaries, one dictionary per message in inverse chronological order (the first dictionary corresponds to the last message while the last dictionary corresponds to the first message). Each dictionary has two keys:
{'structured_text': {'header': header of the message,
'text': [{'part': first part of the message,
'tags': tag of the first part of the message
},
...,
{'part': last part of the message,
'tags': tag of the last part of the message
}
]
}
'meta': {'date': date of the message,
'from': email address of the author of the message,
'to': email address of the recipient of the message
}
}
**structure_email** is designed to be applied on rows of dataframes.
```
from melusine.prepare_email.mail_segmenting import structure_email
row['structured_body'] = structure_email(row)
print('Date of last message :')
print(row['structured_body'][0]['meta']['date'])
print('From of last message :')
print(row['structured_body'][0]['meta']['from'])
print('To of last message :')
print(row['structured_body'][0]['meta']['to'])
print('\n')
print('Header of last message :')
print(row['structured_body'][0]['structured_text']['header'])
print('\n')
print('Segmented text of last message :')
for parts in row['structured_body'][0]['structured_text']['text']:
print(parts['tags']+" :")
print(parts['part'])
print('\n')
print('----------------------------------------------------------------------')
print('\n')
print('Date of first message :')
print(row['structured_body'][1]['meta']['date'])
print('From of first message :')
print(row['structured_body'][1]['meta']['from'])
print('To of first message :')
print(row['structured_body'][1]['meta']['to'])
print('\n')
print('Header of first message :')
print(row['structured_body'][1]['structured_text']['header'])
print('\n')
print('Segmented text of first message :')
for parts in row['structured_body'][1]['structured_text']['text']:
print(parts['tags']+" :")
print(parts['part'])
```
### segmenting transformer
The **build_historic** and **structure_email** functions can be wrapped in a TransformerScheduler object to be applied directly on a dataframe :
```
SegmentingTransformer = TransformerScheduler(
functions_scheduler=[
(build_historic, None, ['structured_historic']),
(structure_email, None, ['structured_body'])
]
)
df_emails.columns
df_emails = SegmentingTransformer.fit_transform(df_emails)
df_emails.columns
```
## Body_header_extraction and cleaning subpackages
### extract_last_body function
The **body_header_extraction subpackage** provides a **extract_last_body function** to extract from the **structured_body column** of a row the parts of the last message that have been tagged as *body*.
```
print(row.body)
for parts in row.structured_body[0]['structured_text']['text']:
if parts['tags']=='BODY':
print(parts['part'])
from melusine.prepare_email.body_header_extraction import extract_last_body
row['last_body'] = extract_last_body(row)
print(row['last_body'])
```
### cleaning subpackage
The **cleaning subpackage** provides two functions to be applied on rows of dataframes :
- **clean_body :** to clean the *last_body* column.
- **clean_header :** to clean the *header* column.
```
from melusine.prepare_email.cleaning import clean_body
clean_body(row)
from melusine.prepare_email.cleaning import clean_header
clean_header(row)
```
### LastBodyHeaderCleaning transformer
The **extract_last_body**, **clean_body**, **clean_header** functions can be wrapped in a TransformerScheduler object to be applied directly on a dataframe :
```
LastBodyHeaderCleaning = TransformerScheduler(
functions_scheduler=[
(extract_last_body, None, ['last_body']),
(clean_body, None, ['clean_body']),
(clean_header, None, ['clean_header'])
]
)
df_emails.columns
df_emails = LastBodyHeaderCleaning.fit_transform(df_emails)
df_emails.columns
```
## Full prepare_email pipeline
```
df_emails = load_email_data()
df_emails.columns
from sklearn.pipeline import Pipeline
# Transformer object to manage transfers and replies
ManageTransferReply = TransformerScheduler(
functions_scheduler=[
(check_mail_begin_by_transfer, None, ['is_begin_by_transfer']),
(update_info_for_transfer_mail, None, None),
(add_boolean_answer, None, ['is_answer']),
(add_boolean_transfer, None, ['is_transfer'])
]
)
# Transformer object to segment the different messages in the email, parse their metadata and
# tag the different part of the messages
Segmenting = TransformerScheduler(
functions_scheduler=[
(build_historic, None, ['structured_historic']),
(structure_email, None, ['structured_body'])
]
)
# Transformer object to extract the body of the last message of the email and clean it as
# well as the header
LastBodyHeaderCleaning = TransformerScheduler(
functions_scheduler=[
(extract_last_body, None, ['last_body']),
(clean_body, None, ['clean_body']),
(clean_header, None, ['clean_header'])
]
)
# Full prepare_email pipeline
PrepareEmailPipeline = Pipeline([
('ManageTransferReply', ManageTransferReply),
('Segmenting', Segmenting),
('LastBodyHeaderCleaning', LastBodyHeaderCleaning)
])
df_emails = PrepareEmailPipeline.fit_transform(df_emails)
df_emails.head()
```
| github_jupyter |
---
description: Create your Continual Learning Benchmark and Start Prototyping
---
# Benchmarks
Welcome to the "_benchmarks_" tutorial of the "_From Zero to Hero_" series. In this part we will present the functionalities offered by the `Benchmarks` module.
```
!pip install git+https://github.com/ContinualAI/avalanche.git
```
## 🎯 Nomenclature
First off, let's clarify a bit the nomenclature we are going to use, introducing the following terms: `Datasets`, `Scenarios`, `Benchmarks` and `Generators`.
* By `Dataset` we mean a **collection of examples** that can be used for training or testing purposes but not already organized to be processed as a stream of batches or tasks. Since Avalanche is based on Pytorch, our Datasets are [torch.utils.Datasets](https://pytorch.org/docs/stable/_modules/torch/utils/data/dataset.html#Dataset) objects.
* By `Scenario` we mean a **particular setting**, i.e. specificities about the continual stream of data, a continual learning algorithm will face.
* By `Benchmark` we mean a well-defined and carefully thought **combination of a scenario with one or multiple datasets** that we can use to asses our continual learning algorithms.
* By `Generator` we mean a function that **given a specific scenario and a dataset can generate a Benchmark**.
## 📚 The Benchmarks Module
The `bechmarks` module offers 3 types of utils:
* **Datasets**: all the Pytorch datasets plus additional ones prepared by our community and particularly interesting for continual learning.
* **Classic Benchmarks**: classic benchmarks used in CL litterature ready to be used with great flexibility.
* **Benchmarks Generators**: a set of functions you can use to create your own benchmark starting from any kind of data and scenario. In particular, we distinguish two type of generators: `Specific` and `Generic`. The first ones will let you create a benchmark based on a clear scenarios and Pytorch dataset\(s\); the latters, instead, are more generic and flexible, both in terms of scenario definition then in terms of type of data they can manage.
* _Specific_:
* **nc\_benchmark**: given one or multiple datasets it creates a benchmark instance based on scenarios where _New Classes_ \(NC\) are encountered over time. Notable scenarios that can be created using this utility include _Class-Incremental_, _Task-Incremental_ and _Task-Agnostic_ scenarios.
* **ni\_benchmark**: it creates a benchmark instance based on scenarios where _New Instances_ \(NI\), i.e. new examples of the same classes are encountered over time. Notable scenarios that can be created using this utility include _Domain-Incremental_ scenarios.
* _Generic_:
* **filelist\_benchmark**: It creates a benchmark instance given a list of filelists.
* **paths\_benchmark**: It creates a benchmark instance given a list of file paths and class labels.
* **tensors\_benchmark**: It creates a benchmark instance given a list of tensors.
* **dataset\_benchmark**: It creates a benchmark instance given a list of pytorch datasets.
But let's see how we can use this module in practice!
## 🖼️ Datasets
Let's start with the `Datasets`. As we previously hinted, in _Avalanche_ you'll find all the standard Pytorch Datasets available in the torchvision package as well as a few others that are useful for continual learning but not already officially available within the Pytorch ecosystem.
```
import torch
import torchvision
from avalanche.benchmarks.datasets import MNIST, FashionMNIST, KMNIST, EMNIST, \
QMNIST, FakeData, CocoCaptions, CocoDetection, LSUN, ImageNet, CIFAR10, \
CIFAR100, STL10, SVHN, PhotoTour, SBU, Flickr8k, Flickr30k, VOCDetection, \
VOCSegmentation, Cityscapes, SBDataset, USPS, Kinetics400, HMDB51, UCF101, \
CelebA, CORe50, TinyImagenet, CUB200, OpenLORIS
# As we would simply do with any Pytorch dataset we can create the train and
# test sets from it. We could use any of the above imported Datasets, but let's
# just try to use the standard MNIST.
train_MNIST = MNIST(
'./data/mnist', train=True, download=True, transform=torchvision.transforms.ToTensor()
)
test_MNIST = MNIST(
'./data/mnist', train=False, download=True, transform=torchvision.transforms.ToTensor()
)
# Given these two sets we can simply iterate them to get the examples one by one
for i, example in enumerate(train_MNIST):
pass
print("Num. examples processed: {}".format(i))
# or use a Pytorch DataLoader
train_loader = torch.utils.data.DataLoader(
train_MNIST, batch_size=32, shuffle=True
)
for i, (x, y) in enumerate(train_loader):
pass
print("Num. mini-batch processed: {}".format(i))
```
Of course also the basic utilities `ImageFolder` and `DatasetFolder` can be used. These are two classes that you can use to create a Pytorch Dataset directly from your files \(following a particular structure\). You can read more about these in the Pytorch official documentation [here](https://pytorch.org/docs/stable/torchvision/datasets.html#imagefolder).
We also provide an additional `FilelistDataset` and `AvalancheDataset` classes. The former to construct a dataset from a filelist [\(caffe style\)](https://ceciliavision.wordpress.com/2016/03/08/caffedata-layer/) pointing to files anywhere on the disk. The latter to augment the basic Pytorch Dataset functionalities with an extention to better deal with a stack of transformations to be used during train and test.
```
from avalanche.benchmarks.utils import ImageFolder, DatasetFolder, FilelistDataset, AvalancheDataset
```
## 🛠️ Benchmarks Basics
The _Avalanche_ benchmarks \(instances of the _Scenario_ class\), contains several attributes that characterize the benchmark. However, the most important ones are the `train` and `test streams`.
In _Avalanche_ we often suppose to have access to these **two parallel stream of data** \(even though some benchmarks may not provide such feature, but contain just a unique test set\).
Each of these `streams` are _iterable_, _indexable_ and _sliceable_ objects that are composed of unique **experiences**. Experiences are batch of data \(or "_tasks_"\) that can be provided with or without a specific task label.
#### Efficiency
It is worth mentioning that all the data belonging to a _stream_ are not loaded into the RAM beforehand. Avalanche actually loads the data when a specific _mini-batches_ are requested at training/test time based on the policy defined by each `Dataset` implementation.
This means that memory requirements are very low, while the speed is guaranteed by a multi-processing data loading system based on the one defined in Pytorch.
#### Scenarios
So, as we have seen, each `scenario` object in _Avalanche_ has several useful attributes that characterizes the benchmark, including the two important `train` and `test streams`. Let's check what you can get from a scenario object more in details:
```
from avalanche.benchmarks.classic import SplitMNIST
split_mnist = SplitMNIST(n_experiences=5, seed=1)
# Original train/test sets
print('--- Original datasets:')
print(split_mnist.original_train_dataset)
print(split_mnist.original_test_dataset)
# A list describing which training patterns are assigned to each experience.
# Patterns are identified by their id w.r.t. the dataset found in the
# original_train_dataset field.
print('--- Train patterns assignment:')
print(split_mnist.train_exps_patterns_assignment)
# A list describing which test patterns are assigned to each experience.
# Patterns are identified by their id w.r.t. the dataset found in the
# original_test_dataset field
print('--- Test patterns assignment:')
print(split_mnist.test_exps_patterns_assignment)
# the task label of each experience.
print('--- Task labels:')
print(split_mnist.task_labels)
# train and test streams
print('--- Streams:')
print(split_mnist.train_stream)
print(split_mnist.test_stream)
# A list that, for each experience (identified by its index/ID),
# stores a set of the (optionally remapped) IDs of classes of patterns
# assigned to that experience.
print('--- Classes in each experience:')
split_mnist.classes_in_experience
```
#### Train and Test Streams
The _train_ and _test streams_ can be used for training and testing purposes, respectively. This is what you can do with these streams:
```
# each stream has a name: "train" or "test"
train_stream = split_mnist.train_stream
print(train_stream.name)
# we have access to the scenario from which the stream was taken
train_stream.scenario
# we can slice and reorder the stream as we like!
substream = train_stream[0]
substream = train_stream[0:2]
substream = train_stream[0,2,1]
len(substream)
```
#### Experiences
Each stream can in turn be treated as an iterator that produces a unique `experience`, containing all the useful data regarding a _batch_ or _task_ in the continual stream our algorithms will face. Check out how can you use these experiences below:
```
# we get the first experience
experience = train_stream[0]
# task label and dataset are the main attributes
t_label = experience.task_label
dataset = experience.dataset
# but you can recover additional info
experience.current_experience
experience.classes_in_this_experience
experience.classes_seen_so_far
experience.previous_classes
experience.future_classes
experience.origin_stream
experience.scenario
# As always, we can iterate over it normally or with a pytorch
# data loader.
# For instance, we can use tqdm to add a progress bar.
from tqdm import tqdm
for i, data in enumerate(tqdm(dataset)):
pass
print("\nNumber of examples:", i + 1)
print("Task Label:", t_label)
```
## 🏛️ Classic Benchmarks
Now that we know how our benchmarks work in general through scenarios, streams and experiences objects, in this section we are going to explore **common benchmarks** already available for you with one line of code yet flexible enough to allow proper tuning based on your needs:
```
from avalanche.benchmarks.classic import CORe50, SplitTinyImageNet, \
SplitCIFAR10, SplitCIFAR100, SplitCIFAR110, SplitMNIST, RotatedMNIST, \
PermutedMNIST, SplitCUB200, SplitImageNet
# creating PermutedMNIST (Task-Incremental)
perm_mnist = PermutedMNIST(
n_experiences=2,
seed=1234,
)
```
Many of the classic benchmarks will download the original datasets they are based on automatically and put it under the `"~/.avalanche/data"` directory.
### How to Use the Benchmarks
Let's see now how we can use the classic benchmark or the ones that you can create through the generators \(see next section\). For example, let's try out the classic `PermutedMNIST` benchmark \(_Task-Incremental_ scenario\).
```
# creating the benchmark instance (scenario object)
perm_mnist = PermutedMNIST(
n_experiences=3,
seed=1234,
)
# recovering the train and test streams
train_stream = perm_mnist.train_stream
test_stream = perm_mnist.test_stream
# iterating over the train stream
for experience in train_stream:
print("Start of task ", experience.task_label)
print('Classes in this task:', experience.classes_in_this_experience)
# The current Pytorch training set can be easily recovered through the
# experience
current_training_set = experience.dataset
# ...as well as the task_label
print('Task {}'.format(experience.task_label))
print('This task contains', len(current_training_set), 'training examples')
# we can recover the corresponding test experience in the test stream
current_test_set = test_stream[experience.current_experience].dataset
print('This task contains', len(current_test_set), 'test examples')
```
## 🐣 Benchmarks Generators
What if we want to create a new benchmark that is not present in the "_Classic_" ones? Well, in that case _Avalanche_ offer a number of utilites that you can use to create your own benchmark with maximum flexibility: the **benchmarks generators**!
### Specific Generators
The _specific_ scenario generators are useful when starting from one or multiple Pytorch datasets you want to create a "**New Instances**" or "**New Classes**" benchmark: i.e. it supports the easy and flexible creation of a _Domain-Incremental_, _Class-Incremental or Task-Incremental_ scenarios among others.
For the **New Classes** scenario you can use the following function:
* `nc_benchmark`
for the **New Instances**:
* `ni_benchmark`
```
from avalanche.benchmarks.generators import nc_benchmark, ni_benchmark
```
Let's start by creating the MNIST dataset object as we would normally do in Pytorch:
```
from torchvision.transforms import Compose, ToTensor, Normalize, RandomCrop
train_transform = Compose([
RandomCrop(28, padding=4),
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
test_transform = Compose([
ToTensor(),
Normalize((0.1307,), (0.3081,))
])
mnist_train = MNIST(
'./data/mnist', train=True, download=True, transform=train_transform
)
mnist_test = MNIST(
'./data/mnist', train=False, download=True, transform=test_transform
)
```
Then we can, for example, create a new benchmark based on MNIST and the classic _Domain-Incremental_ scenario:
```
scenario = ni_benchmark(
mnist_train, mnist_test, n_experiences=10, shuffle=True, seed=1234,
balance_experiences=True
)
train_stream = scenario.train_stream
for experience in train_stream:
t = experience.task_label
exp_id = experience.current_experience
training_dataset = experience.dataset
print('Task {} batch {} -> train'.format(t, exp_id))
print('This batch contains', len(training_dataset), 'patterns')
```
Or, we can create a benchmark based on MNIST and the _Class-Incremental_ \(what's commonly referred to as "_Split-MNIST_" benchmark\):
```
scenario = nc_benchmark(
mnist_train, mnist_test, n_experiences=10, shuffle=True, seed=1234,
task_labels=False
)
train_stream = scenario.train_stream
for experience in train_stream:
t = experience.task_label
exp_id = experience.current_experience
training_dataset = experience.dataset
print('Task {} batch {} -> train'.format(t, exp_id))
print('This batch contains', len(training_dataset), 'patterns')
```
### Generic Generators
Finally, if you cannot create your ideal benchmark since it does not fit well in the aforementioned _new classes_ or _new instances_ scenarios, you can always use our **generic generators**:
* **filelist\_benchmark**
* **paths\_benchmark**
* **dataset\_benchmark**
* **tensors\_benchmark**
```
from avalanche.benchmarks.generators import filelist_benchmark, dataset_benchmark, \
tensors_benchmark, paths_benchmark
```
Let's start with the `filelist_benchmark` utility. This function is particularly useful when it is important to preserve a particular order of the patterns to be processed \(for example if they are frames of a video\), or in general if we have data scattered around our drive and we want to create a sequence of batches/tasks providing only a txt file containing the list of their paths.
For _Avalanche_ we follow the same format of the _Caffe_ filelists \("_path_ _class\_label_"\):
/path/to/a/file.jpg 0
/path/to/another/file.jpg 0
...
/path/to/another/file.jpg M
/path/to/another/file.jpg M
...
/path/to/another/file.jpg N
/path/to/another/file.jpg N
So let's download the classic "_Cats vs Dogs_" dataset as an example:
```
!wget -N --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip
!unzip -q -o cats_and_dogs_filtered.zip
```
You can now see in the `content` directory on colab the image we downloaded. We are now going to create the filelists and then use the `filelist_benchmark` function to create our benchmark:
```
import os
# let's create the filelists since we don't have it
dirpath = "cats_and_dogs_filtered/train"
for filelist, rel_dir, t_label in zip(
["train_filelist_00.txt", "train_filelist_01.txt"],
["cats", "dogs"],
[0, 1]):
# First, obtain the list of files
filenames_list = os.listdir(os.path.join(dirpath, dir))
# Create the text file containing the filelist
# Filelists must be in Caffe-style, which means
# that they must define path in the format:
#
# relative_path_img1 class_label_first_img
# relative_path_img2 class_label_second_img
# ...
#
# For instance:
# cat/cat_0.png 1
# dog/dog_54.png 0
# cat/cat_3.png 1
# ...
#
# Paths are relative to a root path
# (specified when calling filelist_benchmark)
with open(filelist, "w") as wf:
for name in filenames_list:
wf.write(
"{} {}\n".format(os.path.join(rel_dir, name), t_label)
)
# Here we create a GenericCLScenario ready to be iterated
generic_scenario = filelist_benchmark(
dirpath,
["train_filelist_00.txt", "train_filelist_01.txt"],
["train_filelist_00.txt"],
task_labels=[0, 0],
complete_test_set_only=True,
train_transform=ToTensor(),
eval_transform=ToTensor()
)
```
In the previous cell we created a benchmark instance starting from file lists. However, `paths_benchmark` is a better choice if you already have the list of paths directly loaded in memory:
```
train_experiences = []
for rel_dir, label in zip(
["cats", "dogs"],
[0, 1]):
# First, obtain the list of files
filenames_list = os.listdir(os.path.join(dirpath, dir))
# Don't create a file list: instead, we create a list of
# paths + class labels
experience_paths = []
for name in filenames_list:
instance_tuple = (os.path.join(dirpath, rel_dir, name), label)
experience_paths.append(instance_tuple)
train_experiences.append(experience_paths)
# Here we create a GenericCLScenario ready to be iterated
generic_scenario = paths_benchmark(
train_experiences,
[train_experiences[0]], # Single test set
task_labels=[0, 0],
complete_test_set_only=True,
train_transform=ToTensor(),
eval_transform=ToTensor()
)
```
Let us see how we can use the `dataset_benchmark` utility, where we can use several PyTorch datasets as different batches or tasks. This utility expectes a list of datasets for the train, test (and other custom) streams. Each dataset will be used to create an experience:
```
train_cifar10 = CIFAR10(
'./data/cifar10', train=True, download=True
)
test_cifar10 = CIFAR10(
'./data/cifar10', train=False, download=True
)
generic_scenario = dataset_benchmark(
[train_MNIST, train_cifar10],
[test_MNIST, test_cifar10]
)
```
Adding task labels can be achieved by wrapping each datasets using `AvalancheDataset`. Apart from task labels, `AvalancheDataset` allows for more control over transformations and offers an ever growing set of utilities (check the documentation for more details).
```
# Alternatively, task labels can also be a list (or tensor)
# containing the task label of each pattern
train_MNIST_task0 = AvalancheDataset(train_cifar10, task_labels=0)
test_MNIST_task0 = AvalancheDataset(test_cifar10, task_labels=0)
train_cifar10_task1 = AvalancheDataset(train_cifar10, task_labels=1)
test_cifar10_task1 = AvalancheDataset(test_cifar10, task_labels=1)
scenario_custom_task_labels = dataset_benchmark(
[train_MNIST_task0, train_cifar10_task1],
[test_MNIST_task0, test_cifar10_task1]
)
print('Without custom task labels:',
generic_scenario.train_stream[1].task_label)
print('With custom task labels:',
scenario_custom_task_labels.train_stream[1].task_label)
```
And finally, the `tensors_benchmark` generator:
```
pattern_shape = (3, 32, 32)
# Definition of training experiences
# Experience 1
experience_1_x = torch.zeros(100, *pattern_shape)
experience_1_y = torch.zeros(100, dtype=torch.long)
# Experience 2
experience_2_x = torch.zeros(80, *pattern_shape)
experience_2_y = torch.ones(80, dtype=torch.long)
# Test experience
# For this example we define a single test experience,
# but "tensors_benchmark" allows you to define even more than one!
test_x = torch.zeros(50, *pattern_shape)
test_y = torch.zeros(50, dtype=torch.long)
generic_scenario = tensors_benchmark(
train_tensors=[(experience_1_x, experience_1_y), (experience_2_x, experience_2_y)],
test_tensors=[(test_x, test_y)],
task_labels=[0, 0], # Task label of each train exp
complete_test_set_only=True
)
```
This completes the "_Benchmark_" tutorial for the "_From Zero to Hero_" series. We hope you enjoyed it!
## 🤝 Run it on Google Colab
You can run _this chapter_ and play with it on Google Colaboratory: [](https://colab.research.google.com/github/ContinualAI/colab/blob/master/notebooks/avalanche/2.-benchmarks.ipynb)
| github_jupyter |
# load library
```
### visualization
%matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.offline as py
import matplotlib as mat
py.init_notebook_mode(connected=True)
# data analysis and wrangling
import numpy as np
import pandas as pd
#import re
from functools import partial
# other library
import datetime
import calendar
# ignore sys warning
import warnings
import os
import glob
warnings.filterwarnings('ignore')
```
# Cohort analysis
cohort analysis 幫助我們觀察消費者的回購狀況,透過它我們可以迅速發現回購異狀,並以此做更深入的分析,比如比較來源渠道、訂單內容等等。
在這一篇 Jupyter 中,我要示範以 python 畫出cohort table。
## load data from csvs
由於訂單資料很龐大,因此我先透過 API 將訂單資訊載入到本機端(這一步沒有呈現在這邊),然後用 for loop 將每個月的訂單 csv 檔案載入並儲存成一個data frame。
```
path =r'YOUR PATH'
allFiles = glob.glob(path + "/*.csv")
df = pd.DataFrame()
list_ = []
for file_ in allFiles:
df_temp = pd.read_csv(file_,index_col=None, header=0)
list_.append(df_temp)
df = pd.concat(list_)
```
## spilt cart by different shop
由於訂單資訊並不會分商城儲存,因此我們必須先將不同商城的資訊拆開
```
df = df.loc[df.order_status != 'Cancel']
df_10706 = df.loc[df.shop_id == 10706].reset_index().drop('index', axis=1)
del df
#df_10706.head(3)
```
## TS to TG
由於訂單資訊包含商品資訊,因此會出現單筆訂單多個商品(列)的情況,由於cohort table並不需要商品資訊,因此我們將去除商品資訊與去除重複訂單資訊。
```
df_10706_tg = df_10706[['cart_code', 'member_code', 'order_time', 'order_label',
'payment', 'promotion_discount', 'ecoupon_discount']]
#df_10706_tg.head(3)
df_10706_tg = df_10706_tg.groupby(['cart_code', 'member_code', 'order_time', 'order_label'], as_index=False).sum()
df_10706_tg = df_10706_tg.drop_duplicates()
#df_10706_tg.head(3)
```
下面我們要處理時間資訊,由於原始資料的時間資訊細至毫秒,但是計算cohort table時,我們僅需到月的資訊,因此我們將訂購時間抓至月份。
```
df_10706_tg['order_time'] = df_10706_tg['order_time'].str[:7]
df_10706_tg['order_time'] = pd.to_datetime(df_10706_tg['order_time'], format='%Y-%m-%d')
#df_10706_tg.head(3)
```
## Find first buy day
計算cohort的第一步在於先找出每位消費者第一次購買的月份,如此我們才可以根據消費者的首購月份計算每個 m + n 的回購
```
df_10706_tg = pd.merge(df_10706_tg,
df_10706_tg.groupby('member_code', as_index=False)['order_time'].min(),
'left',
'member_code')
df_10706_tg.columns = ['cart_code', 'member_code', 'order_time', 'order_label', 'payment',
'promotion_discount', 'ecoupon_discount', 'first_buy']
#df_10706_tg.head(3)
```
## define cohort_group & order_period
將訂購日期與首購日期轉換成月份,如此方便我們計算cohort
```
df_10706_tg['cohort_group'] = df_10706_tg.first_buy.astype(str).str[:7]
df_10706_tg['order_period'] = df_10706_tg.order_time.astype(str).str[:7]
#df_10706_tg.head(3)
```
## calculate each cohort_group & order_period metrics
在這邊我們要分別計算每群不同首購月份的族群在不同月份的貢獻,這邊我們會特別計算人數與訂單數
```
cohorts = df_10706_tg.groupby(['cohort_group', 'order_period']).agg({'member_code': pd.Series.nunique,
'cart_code': pd.Series.nunique})
# make the column names more meaningful
cohorts.rename(columns={'UserId': 'TotalUsers',
'OrderId': 'TotalOrders'}, inplace=True)
cohorts.head()
```
## label cohort period
這邊我們要將每個月份依照不同的首購月份給予編號,以便我們方便比較。比如對比11月和12月的回購狀況,10月m+1和11月m+1的比較反而更直覺
```
cohorts['period'] = cohorts.groupby(level=0).cumcount()
cohorts.head()
```
## reindex cohort
這邊我們將以剛重新計算的編號來取代原先的編號
```
cohorts.reset_index(inplace=True)
cohorts.set_index(['cohort_group', 'period'], inplace=True)
cohorts.head()
```
## calculate user retention by cohort
這邊開始我們要根據回購人來計算cohort table,後續我們還會根據訂單數來做一次,詳細說明僅會在這邊呈現。
首先我們先取得首購月份的總人數
```
cohorts_usr_group = cohorts['member_code'].groupby(level=0).first()
cohorts_usr_group.head()
```
再來我們先將 cohorts 以剛得到的總人數來計算各個月分的回購比例
```
cohorts_usr = cohorts['member_code'].unstack(0) #將cohort_group作為欄位
cohorts_usr = cohorts_usr.divide(cohorts_usr_group, axis=1) #計算每個月回購率
cohorts_usr.head()
```
## 畫出不同首購月份的回購線圖
```
cohorts_usr[['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06', '2017-07', '2017-08', '2017-09']].plot(figsize=(20,10))
plt.title('Cohorts: User Retention')
plt.xticks(np.arange(0, 12.1, 1))
plt.xlim(0, 12)
#plt.ylim(0, 0.2)
plt.ylabel('% of Cohort Purchasing')
```
## 呈現 cohort table
```
cohorts_usr.columns = ['1612', '1701', '1702', '1703', '1704', '1705',
'1706', '1707', '1708', '1709', '1710']
sns.set(style='white')
plt.figure(figsize=(20, 12))
plt.title('Cohorts: User Retention', size=24)
mat.rcParams.update({'font.size': 14})
plt.tick_params(labelsize=14)
sns.heatmap(cohorts_usr.T, mask=cohorts_usr.T.isnull(), annot=True, fmt='.2%', cmap='Paired')
```
## compute order retention by cohort
下列展示以訂單數為主要衡量回購基礎的算法,方法與上述相同,在此不贅述
```
cohorts_crt_group = cohorts['cart_code'].groupby(level=0).first()
cohorts_crt_group.head()
cohorts_crt = np.round(cohorts['cart_code'].unstack(0).divide(cohorts_crt_group, axis=1), 4)
cohorts_crt.head()
cohorts_crt.reset_index()
```
## 劃出每個月訂單回購變化
```
cohorts_crt[['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06', '2017-07', '2017-08', '2017-09']].plot(figsize=(20,10))
plt.title('Cohorts: Cart Retention')
plt.xticks(np.arange(0, 12.1, 1))
plt.xlim(0, 12)
#plt.ylim(0, 0.2)
plt.ylabel('% of Cohort Purchasing')
```
## 畫出以訂單為基準的cohort table
```
cohorts_crt.columns = ['1612', '1701', '1702', '1703', '1704', '1705', '1706', '1707', '1708', '1709', '1710']
sns.set(style='white')
plt.figure(figsize=(20, 12))
plt.title('Cohorts: User Retention', size=24)
mat.rcParams.update({'font.size': 14})
plt.tick_params(labelsize=14)
sns.heatmap(cohorts_crt.T, mask=cohorts_crt.T.isnull(), annot=True, fmt='.2%', cmap='Paired')
```
# 總結
Cohort analysis只是幫助你及早發現回購或顧客關係維持出狀況,因此這只是你分析的第一步!後續一般而言我還會進一步仔細研究以下差異,但礙於下列步驟會牽扯太多資訊,因此在此簡單條列給各位:
1. 觀察表現不好月份的購買商品排行,這部分還可以結合商品的回購率來比較
2. 觀察訂單的來源渠道,觀察這些渠道的變化,再進一步尋找是否該渠道的操作不同以往
3. 觀察表現不好的月份的廣告排名,觀察是否是新的廣告形式、族群、內容所導致的結果
4. 若你有做NPS,去觀察是否是服務、商品等原因導致消費者不回購
5. 觀察商城的產品狀況,如主打活動商品維和、庫存狀態和效期
最後希望這些步驟能幫助到需要回購分析的朋友,若要轉載再麻煩標明出處唷!
| github_jupyter |
RIHAD VARIAWA, Data Scientist - Who has fun LEARNING, EXPLORING & GROWING
<h1>Lists</h1>
<li>Sequential, Ordered Collection
<h2>Creating lists</h2>
```
x = [4,2,6,3] #Create a list with values
y = list() # Create an empty list
y = [] #Create an empty list
print(x)
print(y)
```
<h3>Adding items to a list</h3>
```
x=list()
print(x)
x.append('One') #Adds 'One' to the back of the empty list
print(x)
x.append('Two') #Adds 'Two' to the back of the list ['One']
print(x)
x.insert(0,'Half') #Inserts 'Half' at location 0. Items will shift to make roomw
print(x)
x=list()
x.extend([1,2,3]) #Unpacks the list and adds each item to the back of the list
print(x)
```
<h3>Indexing and slicing</h3>
```
x=[1,7,2,5,3,5,67,32]
print(len(x))
print(x[3])
print(x[2:5])
print(x[-1])
print(x[::-1])
```
<h3>Removing items from a list</h3>
```
x=[1,7,2,5,3,5,67,32]
x.pop() #Removes the last element from a list
print(x)
x.pop(3) #Removes element at item 3 from a list
print(x)
x.remove(7) #Removes the first 7 from the list
print(x)
```
<h3>Anything you want to remove must be in the list or the location must be inside the list</h3>
```
x.remove(20)
```
<h2>Mutablility of lists</h2>
```
y=['a','b']
x = [1,y,3]
print(x)
print(y)
y[1] = 4
print(y)
print(x)
x="Hello"
print(x,id(x))
x+=" You!"
print(x,id(x)) #x is not the same object it was
y=["Hello"]
print(y,id(y))
y+=["You!"]
print(y,id(y)) #y is still the same object. Lists are mutable. Strings are immutable
def eggs(item,total=0):
total+=item
return total
def spam(elem,some_list=[]):
some_list.append(elem)
return some_list
print(eggs(1))
print(eggs(2))
print(spam(1))
print(spam(2))
```
<h1>Iteration</h1>
<h2>Range iteration</h2>
```
#The for loop creates a new variable (e.g., index below)
#range(len(x)) generates values from 0 to len(x)
x=[1,7,2,5,3,5,67,32]
for index in range(len(x)):
print(x[index])
list(range(len(x)))
```
<h3>List element iteration</h3>
```
x=[1,7,2,5,3,5,67,32]
#The for draws elements - sequentially - from the list x and uses the variable "element" to store values
for element in x:
print(element)
```
<h3>Practice problem</h3>
Write a function search_list that searches a list of tuple pairs and returns the value associated with the first element of the pair
```
def search_list(list_of_tuples,value):
#Write the function here
for t in prices:
if t[0] == value:
return t[1]
prices = [('AAPL',96.43),('IONS',39.28),('GS',159.53)]
ticker = 'IONS'
print(search_list(prices,ticker))
```
<h1>Dictionaries</h1>
```
mktcaps = {'AAPL':538.7,'GOOG':68.7,'IONS':4.6}
mktcaps['AAPL'] #Returns the value associated with the key "AAPL"
mktcaps['GS'] #Error because GS is not in mktcaps
mktcaps.get('GS') #Returns None because GS is not in mktcaps
mktcaps['GS'] = 88.65 #Adds GS to the dictionary
print(mktcaps)
del(mktcaps['GOOG']) #Removes GOOG from mktcaps
print(mktcaps)
mktcaps.keys() #Returns all the keys
mktcaps.values() #Returns all the values
list1 = [1, 2, 3, 4, 5, 6, 7]
list1[0]
list1[:2]
list1[:-2]
list1[3:5]
data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
print(data[1][0][0])
numbers = [1, 2, 3, 4]
numbers.append([5, 6, 7, 8])
print(len(numbers))
list1 = [1, 2, 3, 4, 5, 6, 7]
print(list1[0])
print(list1[:2])
print(list1[:-2])
print(list1[3:5])
dict1 = {"john":40, "peter":45}
dict2 = {"john":466, "peter":45}
dict1 > dict2
dict1 = {"a":1, "b":2}# to delete the entry for "a":1, use ________.
#d.delete("a":1)
#dict1.delete("a")
#del dict1("a":1)
del dict1["a"]
dict1
s = {1, 2, 4, 3}# which of the following will result in an exception (error)? Multiple options may be correct.
#print(s[3])
print(max(s))
print(len(s))
#s[3] = 45
```
| github_jupyter |
# بسم الله الرحمن الرحيم
## Information Theory Course
### Course Code: IS311T
#### Core Course
- Theory: 2
- Tutorial: 3
- TOT: 3
#### Marks:
- 25 Written
- 75 Final
- 100 Total
#### Syllabus
- Introduction of Information measures (entropy, mutual information) and basic properties
- Typical sets and the Asymptotic Equipartition Property
- Entropy rates for stochastic processes, Markov Chains
- Data Compression / Lossless Source Coding
- Huffman coding, Lempel-Ziv Compression
- Channel coding, information capacity
- The channel coding theorem for discrete memory less channels - random coding proof, error exponents
- Converse to the channel coding theorem, joint source channel coding, feedback capacity
- Differential entropy, Gaussian noise channels
- Continous-time Gaussian channels, band-limited channels
- Quantization/Rate Distortion theory
#### Online Resources
- Yury Polyanskiy. 6.441 Information Theory. Spring 2016. Massachusetts Institute of Technology: MIT OpenCourseWare, https://ocw.mit.edu. License: Creative Commons BY-NC-SA.
- Course on Information Theory, Pattern Recognition, and Neural Networks http://www.inference.org.uk/mackay/itila/
- https://www.itsoc.org/conferences/schools/past-schools/na-school-2009/material
- http://people.seas.harvard.edu/~madhusudan/courses/Spring2016/
- Introduction to Information Theory and Why you should care - https://recast.ai/blog/introduction-information-theory-care/
### In the Beginning
#### Let's agree on
- **Academic Discipline** - An academic discipline or academic field is a branch of knowledge. It incorporates expertise, people, projects, communities, challenges, studies, inquiry, and research areas that are strongly associated with a given scholastic subject area or college department. For example, the branches of science are commonly referred to as the scientific disciplines, e.g. physics, mathematics, and biology. Individuals associated with academic disciplines are commonly referred to as experts or specialists. Others, who may have studied liberal arts or systems theory rather than concentrating in a specific academic discipline, are classified as generalists.
- **Interdisiplinary** - or interdisciplinary studies involves the *combining of two or more academic disciplines into one activity* (e.g., a research project).It draws knowledge from several other fields like sociology, anthropology, psychology, economics etc. It is about creating somet by thinking across boundaries. It is related to an interdiscipline or an interdisciplinary field, which is an organizational unit that crosses traditional boundaries between academic disciplines or schools of thought, as new needs and professions emerge. Large engineering teams are usually interdisciplinary, as a power station or mobile phone or other project requires the melding of several specialties.
- **Transdisciplinarity** - connotes a *research strategy that crosses many disciplinary boundaries to create a holistic approach*. It applies to research efforts focused on problems that cross the boundaries of two or more disciplines, such as research on effective information systems for biomedical research (bioinformatics), and can refer to concepts or methods that were originally developed by one discipline, but are now used by several others, such as ethnography, a field research method originally developed in anthropology but now widely used by other disciplines. The Belmont Forum elaborated that a transdisciplinary approach is enabling inputs and scoping across scientific and non-scientific stakeholder communities and facilitating a systemic way of addressing a challenge. This includes initiatives that support the capacity building required for the successful transdisciplinary formulation and implementation of research actions.
- **Cross-disciplinary** - knowledge is that which *explains aspects of one discipline in terms of another*. Common examples of cross-disciplinary approaches are studies of the physics of music or the politics of literature.
#### Not to be confused with:
- **Computer Information System(s) (CIS)** - is a field studying computers and algorithmic processes, including their principles, their software and hardware designs, their applications, and their impact on society,whereas *Information Systems* emphasizes functionality over design. Several Information Systems scholars have debated the nature and foundations of Information Systems which have its roots in other reference disciplines such as Computer Science, Engineering, Mathematics, Management Science, Cybernetics, and others.
Information systems also can be defined as a collection of hardware, software, data, people and procedures that work together to produce quality information.
- **Information System** - is an organized system for the collection, organization, storage and communication of information. More specifically, it is the study of complementary networks that people and organizations use to collect, filter, process, create and distribute data. Further, "Information System (IS) is a group of components that interact to produce information. It focuses on the internal rather than the external." Information system can also be described as a combination of hardware, software, data, business process and functions which can be used to increase efficiency and management of an organization. Information Systems is the expression used to describe an Automated System (which may be referred to as a Computerized Information System), be it manual, which covers people, machines or organized methods to collect, process, transmit and disseminate data representing information for the user or client.
- **Information Science** - a field primarily concerned with the analysis, collection, classification, manipulation, storage, retrieval, movement, dissemination, and protection of information. [1] Practitioners within and outside the field study application and usage of knowledge in organizations along with the interaction between people, organizations, and any existing information systems with the aim of creating, replacing, improving, or understanding information systems. Historically, information science is associated with computer science, library science, and telecommunications.[2] However, information science also incorporates aspects of diverse fields such as archival science, cognitive science, commerce, law, museology, management, mathematics, philosophy, public policy, and social sciences.
- **Library science** (often termed library studies, library and information science, bibliothecography, library economy) is an interdisciplinary or multidisciplinary field that applies the practices, perspectives, and tools of management, information technology, education, and other areas to libraries; the collection, organization, preservation, and dissemination of information resources; and the political economy of information. Martin Schrettinger, a Bavarian librarian, coined the discipline within his work (1808–1828) Versuch eines vollständigen Lehrbuchs der Bibliothek-Wissenschaft oder Anleitung zur vollkommenen Geschäftsführung eines Bibliothekars. Rather than classifying information based on nature-oriented elements, as was previously done in his Bavarian library, Schrettinger organized books in alphabetical order. The first American school for library science was founded by Melvil Dewey at Columbia University in 1887.
- **Informatics** is a branch of information engineering. It involves the practice of information processing and the engineering of information systems, and as an academic field it is an applied form of information science. The field considers the interaction between humans and information alongside the construction of interfaces, organisations, technologies and systems. As such, the field of informatics has great breadth and encompasses many subspecialties, including disciplines of computer science, information systems, information technology and statistics. Since the advent of computers, individuals and organizations increasingly process information digitally. This has led to the study of informatics with computational, mathematical, biological, cognitive and social aspects, including study of the social impact of information technologies
**References**
1. Stock, W.G., & Stock, M. (2013). Handbook of Information Science. Berlin, Boston, MA: De Gruyter Saur.
2. Yan, Xue-Shan (2011-07-23). Information Science: Its Past, Present and Future (PDF). doi:10.3390/info2030510. Retrieved 2017-11-05.

## Information Theory
- studies the quantification, storage, and communication of information.
- **Quantification** - In mathematics and empirical science, quantification (or quantitation) is the act of counting and measuring that maps human sense observations and experiences into quantities. Quantification in this sense is fundamental to the scientific method.
- **Computer data storage** - often called storage or memory, is a technology consisting of computer components and recording media that are used to retain digital data. It is a core function and fundamental component of computers.
- **Communication** - Telecommunication is the transmission of signs, signals, messages, words, writings, images and sounds or information of any nature by wire, radio, optical or electromagnetic systems.Telecommunication occurs when the exchange of information between communication participants includes the use of technology. It is transmitted either electrically over physical media, such as cables, or via electromagnetic radiation. Such transmission paths are often divided into communication channels which afford the advantages of multiplexing. The term telecommunications is often used in its plural form because it involves many different technologies.
- **Information** is any entity or form that provides the answer to a question of some kind or resolves uncertainty. It is thus related to data and knowledge, as data represents values attributed to parameters, and knowledge signifies understanding of real things or abstract concepts. As it regards data, the information's existence is not necessarily coupled to an observer (it exists beyond an event horizon, for example), while in the case of knowledge, the information requires a cognitive observer.
### Let's focus more on Information - Part 1
### Code / Encode / Decode / Encrypt
- Information is conveyed either as the content of a message or through direct or indirect observation. That which is perceived can be construed as a message in its own right, and in that sense, information is always conveyed as the content of a message.
- Information can be encoded into various forms for transmission and interpretation (for example, information may be encoded into a sequence of signs, or transmitted via a signal). It can also be encrypted for safe storage and communication.
- In communications and information processing, **code** is a system of rules to convert information—such as a letter, word, sound, image, or gesture—into another form or representation, sometimes shortened or secret, for communication through a communication channel or storage in a storage medium. An early example is the invention of language, which enabled a person, through speech, to communicate what he or she saw, heard, felt, or thought to others. But speech limits the range of communication to the distance a voice can carry, and limits the audience to those present when the speech is uttered. The invention of writing, which converted spoken language into visual symbols, extended the range of communication across space and time.
- The process of **encoding** converts information from a source into symbols for communication or storage.
- **Decoding** is the reverse process, converting code symbols back into a form that the recipient understand, such as English or Spanish.
- One reason for coding is to enable communication in places where ordinary plain language, spoken or written, is difficult or impossible. For example, semaphore, where the configuration of flags held by a signaler or the arms of a semaphore tower encodes parts of the message, typically individual letters and numbers. Another person standing a great distance away can interpret the flags and reproduce the words sent.
- 
- In cryptography, **encryption** is the process of encoding a message or information in such a way that only authorized parties can access it and those who are not authorized cannot.
- Encryption does not itself prevent interference, but denies the intelligible content to a would-be interceptor.
- In an encryption scheme, the intended information or message, referred to as plaintext, is encrypted using an encryption algorithm – a cipher – generating ciphertext that can be read only if decrypted.
- For technical reasons, an encryption scheme usually uses a pseudo-random encryption key generated by an algorithm.
- It is in principle possible to decrypt the message without possessing the key, but, for a well-designed encryption scheme, considerable computational resources and skills are required.
- An authorized recipient can easily decrypt the message with the key provided by the originator to recipients but not to unauthorized users.
### Let's focus more on Information - Part 2
### Transmission / Signal
- In telecommunications, transmission (abbreviations: TX, Xmit) is the process of sending and propagating an analogue or digital information signal over a physical point-to-point or point-to-multipoint transmission medium, either wired, optical fiber or wireless.
- One example of transmission is the sending of a signal with limited duration, for example a block or packet of data, a phone call, or an email.
- Transmission technologies and schemes typically refer to physical layer protocol duties such as modulation, demodulation, line coding, equalization, error control, bit synchronization and multiplexing, but the term may also **involve higher-layer protocol duties**, for example, digitizing an analog message signal, and data compression.
- Transmission of a digital message, or of a digitized analog signal, is known as digital communication.
### Signal
- In communication systems, signal processing, and electrical engineering, a signal is a **function that "conveys information about the behavior or attributes of some phenomenon"**.
- A signal may also be defined as an **"observable change in a quantifiable entity"**.
- In the physical world, any **quantity exhibiting variation in time or variation in space (such as an image) is potentially a signal that might provide information on the status of a physical system, or convey a message between observers, among other possibilities.**
- The IEEE Transactions on Signal Processing states that the term "signal" includes audio, video, speech, image, communication, geophysical, sonar, radar, medical and musical signals.
- In a later development, a signal is redefined as an **"observable change in a quantifiable entity"**; here, anything which is only a function of space, such as an image, is excluded from the category of signals. Also, it is stated that a signal may or may not contain any information.
## Practical Aspects of the Course
#### Using Python and NumPy through Jupyter Notebook
#### Why we use Jupyter Notebook
#### Why use Python with Information Theory
- https://www.frontiersin.org/articles/10.3389/neuro.11.004.2009/full
#### More information about NumPy
#### Steps to run Jupyter Notebook
- Make sure you have Python installed on your Operating System
- Run the following command on the terminal
- pip3 install jupyterlab
- jupyter lab
- You will get something like this on the terminal: http://localhost:8888/?token=SOME-VALUE-GOES-HERE
- Copy the previous URL, open your browser, and paste the URL
- You are ready to Go!
```
# Import Modules
import sys
# Scientific Computing in Python
import numpy as np
# Get some information
print('Python Version: {}'.format(sys.version))
print('NumPy Version: {}'.format(np.__version__))
# Make sure you get the messages before moving to the next step
```
## Let's review some Linear Algebra
### Scalar
- (x) - a single number or value
- denoted by a lower case letter, (x) in this case
### Vector
- (**x**) an array of numbers, either in a row or in a column, identified by an index
- denoted by a lower case bold letter
- might also denoted by a lower case letter, with bar on the top ($\bar{x}$)
- Indexing is Zero based
- 1-Dimensional
### Matrices
- (**X**) a 2-D array of numbers, where each element i identified by two indeces.
- denoted by a bold and capital case letter
- 2-Dimensional
- Indexing is Zero based
- Dimensions are reported in (Rows, Columns) format
- $\begin{bmatrix}
1 & 2\\
3 & 4\\
\end{bmatrix}$ - dimensions(2,3)
#### Matrix Operations
- Given the following Matrices
- **A** = $\begin{bmatrix} 1 & 2 & 3\\ 4 & 5 & 7 \\ \end{bmatrix}$
- **B** = $\begin{bmatrix} 4 & 5 & 6 \\ 7 & 8 & 9 \\ \end{bmatrix}$
- Addition: A + B = $\begin{bmatrix} 5 & 7 & 9 \\ 11 & 13 & 16 \\ \end{bmatrix}$
- Subtraction: A - B
- Multiplication: A * B
### Tensors
```
# define scalar
x = 6
x
# define vector
x = np.array((1,2,3,4,5))
x
# Get vector properties / characteristics
print('Vector x Dimensions: {}'.format(x.shape))
print('Vector x Size: {}'.format(x.size))
# define a Matrix
x = np.matrix(([1,2,3,10],[4,5,6,11],[7,8,9,12]))
# Get Matrix properties / characteristics
# Matrix size: no. of elements in matrix
# Matrix Dimensions (row, col)
print('Matrix x Dimensions: {}'.format(x.shape))
print('Matrix x Size: {}'.format(x.size))
# define Matrix of any dimension
np.ones((3,4))
np.zeros((2,2))
# Tensors
np.ones((3,3,3))
# indexing
x = np.ones((5,5), dtype=int)
# note the difference btween x and print(x) - try it yourself
print(x)
# Matrix Addition
a = np.array((1,2,3))
b = np.array((4,5,6))
print(a+b)
print(a-b)
print(a*b)
```
| github_jupyter |
<h1>Data increase with Enormous rate
```
from IPython.display import display, Image
display(Image(filename='Data growth.PNG'))
```
<h1> What is Data Mining?
Data mining is a rapidly growing field that is concerned with developing techniques to assist managers and decision makers to make
intelligent use of these repositories. The goal is to discover <b>meaningful new correlations, patterns and trends</b> by sifting through large amounts of data stored in repositories, using techniques developed in <b>pattern recognition, machine learning, artificial intelligence, statistics and mathematics</b>
```
from IPython.display import display, Image
display(Image(filename='Data mining process.png'))
```
<h2>Sample data you can play with
Lending Club: <br>
These files contain complete loan data for all loans issued through the <b>2007-2015</b>, including the current loan status (Current, Late, Fully Paid, etc.) and latest payment information. The file containing loan data through the "present" contains complete loan data for all loans issued through the previous completed calendar quarter. Additional features include credit scores, number of finance inquiries, address including zip codes, and state, and collections among others. The file is a matrix of about <b>890 thousand observations and 75 variables</b>. A data dictionary is provided in a separate file.
https://storage.googleapis.com/kaggle-data-sets/34/334209/bundle/archive.zip?GoogleAccessId=web-data@kaggle-161607.iam.gserviceaccount.com&Expires=1582857554&Signature=CTb6MbE9v3ss2O9D8y0ZW%2FNSG40Og52KFjVzC3wYumBYZCtjOQC1RdEZhCDDR%2FpjSCVcQf8tFSUgqk%2FQnugwao4ClY6%2BoWSwflkslT%2BOMqVkCH8eEE%2BgsYPc8rEoYwqRQvO9Y2NezaRwr3iFYbkRpKoNsQlbnBMLPbNwXcOujuajDQLg64QslEH%2BZB%2FjZdFI3R87PQ7ORoa%2FEOEg0CDc4zt%2BOYJfzJSLCuGGNPZW%2F6yBTzlZG%2FW2FdY7P2tylnC7rmCaiXREDZ5fiHHRy8LQ34iM7irztZiWu0aBPLKd%2BouSYa3WnAgfJ4oO8TTTDrkufViLkjScr2l%2BG12s8anAUQ%3D%3D&response-content-disposition=attachment%3B+filename%3Dlending-club-loan-data.zip
Movie Review Data:<br>
This page is a distribution site for movie-review data for use in sentiment-analysis experiments. Available are collections of movie-review documents <b>labeled with respect to their overall sentiment polarity</b> (positive or negative) or <b>subjective rating</b> (e.g., "two and a half stars") and sentences labeled with respect to their subjectivity status (subjective or objective) or polarity. These data sets were introduced in the following papers:
http://www.cs.cornell.edu/people/pabo/movie-review-data/
<h2>What Kinds of Patterns Can Be Mined
<h3>Frequent Patterns, Associations, and Correlations
1. Frequent patterns
2. Association analysis
<h3>Classification and Regression for Predictive Analysis
1. Classification
2. Regression
3. Cluster analysis
4. Outlier Analysis
<h3>Are All Patterns Interesting?
Cosition vs correlation
<h3>Most time, world can be explained by common sense
<h1>Google Cloud Guidance
Upload data into Cloud Storage
https://cloud.google.com/storage/docs/quickstart-console?hl=zh-tw
Link the Cloud storage data with Big query
https://cloud.google.com/bigquery/external-data-cloud-storage
Set up Jupyter notebook, and link it with Big query data warehouse
https://jingsblog.com/2018/11/27/connect-your-jupyter-notebook-to-the-bigquery/
```
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib as mpl
import seaborn as sns
import matplotlib.pyplot as plt
pd.options.display.max_rows = 999
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', -1)
import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/home/tianboli1987/.config/gcloud/application_default_credentials.json'
from google.cloud import bigquery
bigquery_client = bigquery.Client(project='double-airport-256012')
query1 = """select * from load.loan3
"""
query_job = bigquery_client.query(query1)
df = query_job.to_dataframe()
df.head()
df.describe()
```
<h2>Data preprocessing
Data quality is defined in terms of accuracy, completeness, consistency, timeliness,
believability, and interpretabilty. These qualities are assessed based on the intended
use of the data.
1. Data Cleaning
2. Data Integration
3. Data Transformation and Data Discretization
```
df["target"] = np.where(df["acc_now_delinq"]== 0, 0, 1)
df_1= df["uuid"].groupby(df['target']).count()
df_1
list_dfn = list(df.columns)
df1 = df.drop(['uuid', 'emp_title','title','zip_code',
'addr_state','id', 'member_id', 'url',
'policy_code','issue_d', 'earliest_cr_line',
'last_pymnt_d', 'next_pymnt_d', 'last_credit_pull_d',
'sec_app_earliest_cr_line', 'hardship_start_date',
'hardship_end_date', 'payment_plan_start_date',
'debt_settlement_flag_date', 'settlement_date','desc'], axis=1)
df3 = pd.merge(df1.select_dtypes(include=['object','category']), df1[['target']], left_index=True, right_index=True)
df4 = df1.select_dtypes(include='number')
```
<h2>Association Rules
<h3>Weight of Evidence and Information Value
The weight of evidence tells the predictive power of an independent variable in relation to the dependent variable.
It's good to understand the concept of WOE in terms of events and non-events. It is calculated by taking the natural logarithm (log to base e) of division of % of non-events and % of events.
```
from IPython.display import display, Image
display(Image(filename='WOE.png'))
import scorecardpy as sc
bins = sc.woebin(df1, y="target")
# uuid, emp_title, title, zip_code, addr_state
list_dfn = list(df1.columns)
full_woe = pd.concat([bins[list_dfn] for list_dfn in bins.keys()]).reset_index(drop=True)
full_woe.head(10)
full_iv = full_woe[["variable","total_iv"]].drop_duplicates().sort_values(by=['total_iv'],ascending=False).reset_index(drop=True)
full_iv_selected = full_iv[full_iv["total_iv"] > 0.5]
list_sel = list(full_iv_selected["variable"])
import matplotlib.pyplot as plt
for i in list_sel:
print(i)
ax = sns.barplot(x="bin", y="woe", data=full_woe[full_woe["variable"]== i])
plt.show(block=False)
full_iv_2 = full_iv[full_iv["total_iv"] > 0]
list_sel2 = list(full_iv_2["variable"])
import seaborn as sns
import matplotlib.pyplot as plt
corr = df1[list_sel].corr()
fig, ax = plt.subplots(figsize=(10,10))
sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns,annot=True, linewidths=.5, ax=ax)
df2 = sc.woebin_ply(df1, bins)
df2.head()
list_cat = list(set(list_sel2) & set(list(df3.columns)))
for i in list_cat:
a = bins[i][['variable','breaks']]
a['breaks'] = a["breaks"].replace("%", "",regex=True)
f =pd.concat([a[['breaks']], a["breaks"].str.split(',', expand=True)], axis=1)
e1 = f.set_index('breaks').T
mydict = {}
for a, b in e1.iteritems():
c = b.to_numpy().tolist()
for j in range(len(c)):
if c[j] is not None:
currentid = c[j]
currentvalue = a
mydict.setdefault(currentid, [])
mydict[currentid].append(currentvalue)
df3[str(i)+'_bin'] = df3[i].map(mydict)
df3['bin_'+str(i)] = df3[str(i)+'_bin'].astype(str).str.replace('\[|\]|\'', '')
df3.head()
list_cat = list(set(list_sel2) & set(list(df3.columns)))
for i in list_cat:
figsize=(20, 16)
g = sns.FacetGrid(df3, col="target", height=10, aspect=1)
g = g.map(sns.countplot, "bin_"+str(i),color="r",order=np.sort(df3['bin_'+str(i)].astype(str).unique()))
list_num = list(set(list_sel2) & set(list(df4.columns)))
for i in list_num:
a = bins[i][['breaks']]
b = a[(a['breaks'] != 'inf') & (a['breaks'] != '-inf')&(a['breaks'] != 'missing')]["breaks"]
c = b.values.astype(float).tolist()
if len(c) != 0:
innercutpoints = [-np.inf] + c + [np.inf] # add -Inf, +Inf to cutpoints
cutpoints = list(dict.fromkeys(list(dict.fromkeys(innercutpoints))))
df4['bin_'+str(i)] = pd.cut(df4[i], bins = cutpoints, duplicates='drop')
df4.head()
g = sns.PairGrid(df4, y_vars=["loan_amnt"], x_vars=["num_tl_op_past_12m"], size=10)
g.map(plt.scatter)
```
| github_jupyter |
## Notebook to ensure tip order is correct
#### Concerns:
1. Are the names being swapped correctly when changing to msprime naming (1-ntips) and then back to the names on the species tree (toytree)?
2. Is the sequence array being ordered correctly given the names applied to the trees?
3. For balanced splits (cherries) is arbitrary ordering of the tree consequential?
#### How this is demonstrated in this notebook:
1. We show that the relationships in the species tree match those in the gene trees, and this is made even more clear by setting different Ne values on different branches so that their coalescent times clearly vary.
2. We show that the inferred gene tree for a locus matches the true genealogy, and this is made even more clear by setting different Ne values on different branches so that their coalescent times clearly vary.
3. Again, by setting different Ne values on different branches it is clear that the topology and coalescent times are being used correctly. We show this by testing on both balanced and imbalanced species trees.
```
import ipcoal
import toytree
import toyplot
import numpy as np
```
### 1. Name ordering
We can see clearly in this example that the name ordering is working correctly. Here I sample three individuals from each population in a tree with 8 populations. I then set the Ne to be large or small for one of each pair of sister species. We can see that the odd-numbered tips have large Ne, and the even-numbered tips have small Ne and thus short coalescent times. The relationships and coalescent times appear as expected.
#### imbalanced tree
This looks correct.
```
# load a species tree
tree = toytree.rtree.imbtree(8, treeheight=1e6)
# set Ne values to high for odd number tip names
tree = tree.set_node_values(
attr="Ne",
default=1e3,
values={7: 2e5, 5:2e5, 3:2e5, 0:2e5},
)
# load the ipcoal model and sample trees
model = ipcoal.Model(tree, samples=3, seed=333)
model.sim_trees(3);
# plot the species tree and a sampled genealogy side by side
canvas = toyplot.Canvas(width=700, height=400)
ax0 = canvas.cartesian(bounds=("10%", "45%", "10%", "90%"))
ax1 = canvas.cartesian(bounds=("55%", "90%", "10%", "90%"))
ax0.show = False; ax1.show = False
# draw species tree
tre = toytree.tree(model.tree)
tre.draw(
axes=ax0,
node_labels=tre.get_node_values("idx", 1, 1),
edge_widths="Ne",
);
# draw genealogy
tre = toytree.tree(model.df.genealogy[0])
tre.draw(axes=ax1);
```
#### balanced tree
```
# load a species tree
tree = toytree.rtree.baltree(8, treeheight=1e6)
# set Ne values to high for odd number tip names
tree = tree.set_node_values(
attr="Ne",
default=1e3,
values={7: 2e5, 5:2e5, 3:2e5, 1:2e5},
)
# load the ipcoal model and sample trees
model = ipcoal.Model(tree, samples=3, seed=333)
model.sim_loci(3);
# plot the species tree and a sampled genealogy side by side
canvas = toyplot.Canvas(width=700, height=400)
ax0 = canvas.cartesian(bounds=("10%", "45%", "10%", "90%"))
ax1 = canvas.cartesian(bounds=("55%", "90%", "10%", "90%"))
ax0.show = False; ax1.show = False
# draw species tree
tre = toytree.tree(model.tree)
tre.draw(
axes=ax0,
node_labels=tre.get_node_values("idx", 1, 1),
edge_widths="Ne",
);
# draw genealogy
tre = toytree.tree(model.df.genealogy[0])
tre.draw(axes=ax1);
```
### (2) Are the sequences ordered correctly?
This looks like no. And I'm starting to worry that the there is an error either in rooting, or in evolving data along an edge near the root, since this edge length is basically missing...
#### imbalanced tree
The edge lengths look very good, but the tip names are wrong.
```
# load a species tree
tree = toytree.rtree.imbtree(8, treeheight=1e6)
# set Ne values on tips
tree = tree.set_node_values(
attr="Ne",
default=1e3,
values={7: 2e5, 5:2e5, 3:2e5, 0:2e5},
)
# load the ipcoal model and sample trees
model = ipcoal.Model(tree, samples=3, seed=123, recomb=0)
model.sim_loci(1, 100000)
model.infer_gene_trees()
# plot the species tree and a sampled genealogy side by side
canvas = toyplot.Canvas(width=700, height=400)
ax0 = canvas.cartesian(bounds=("10%", "45%", "10%", "90%"))
ax1 = canvas.cartesian(bounds=("55%", "90%", "10%", "90%"))
ax0.show = False; ax1.show = False
# draw genealogy
toytree.tree(model.df.genealogy[0]).draw(axes=ax0);
# draw inferred tree
tre = toytree.tree(model.df.inferred_tree[0])
rtre = tre.root(wildcard="r7")
rtre.draw(axes=ax1);
```
#### balanced tree
Rooting edge length problem here needs fixed in toytree...
```
# load a species tree
tree = toytree.rtree.baltree(8, treeheight=1e6)
# set Ne values on tips
tree = tree.set_node_values(
attr="Ne",
default=1e3,
values={7: 2e5, 5:2e5, 3:2e5, 1:2e5},
)
# load the ipcoal model and sample trees
model = ipcoal.Model(tree, samples=3, seed=333, recomb=0)
model.sim_loci(1, 100000)
model.infer_gene_trees()
# plot the species tree and a sampled genealogy side by side
canvas = toyplot.Canvas(width=700, height=400)
ax0 = canvas.cartesian(bounds=("10%", "45%", "10%", "90%"))
ax1 = canvas.cartesian(bounds=("55%", "90%", "10%", "90%"))
ax0.show = False; ax1.show = False
# draw genealogy
toytree.tree(model.df.genealogy[0]).draw(axes=ax0);
# draw inferred tree
tre = toytree.tree(model.df.inferred_tree[0])
rtre = tre.root(regex="r[4-7].")
rtre.draw(axes=ax1);
```
| github_jupyter |
# Image Classification and Object Localization
In this lab, you'll build a CNN from scratch to:
- classify the main subject in an image
- localize it by drawing bounding boxes around it.
You'll use the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset to synthesize a custom dataset for the task:
- Place each "digit" image on a black canvas of width 75 x 75 at random locations.
- Calculate the corresponding bounding boxes for those "digits".
The bounding box prediction can be modelled as a "regression" task, which means that the model will predict a numeric value (as opposed to a category).
## Imports
```
import os, re, time, json
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from matplotlib import pyplot as plt
import tensorflow_datasets as tfds
print("Tensorflow version " + tf.__version__)
```
# Visualization Utilities
These functions are used to draw bounding boxes around the digits.
```
#@title Plot Utilities for Bounding Boxes [RUN ME]
im_width = 75
im_height = 75
use_normalized_coordinates = True
def draw_bounding_boxes_on_image_array(image,
boxes,
color=[],
thickness=1,
display_str_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: a list of strings for each bounding box.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = PIL.Image.fromarray(image)
rgbimg = PIL.Image.new("RGBA", image_pil.size)
rgbimg.paste(image_pil)
draw_bounding_boxes_on_image(rgbimg, boxes, color, thickness,
display_str_list)
return np.array(rgbimg)
def draw_bounding_boxes_on_image(image,
boxes,
color=[],
thickness=1,
display_str_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: a list of strings for each bounding box.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
draw_bounding_box_on_image(image, boxes[i, 1], boxes[i, 0], boxes[i, 3],
boxes[i, 2], color[i], thickness, display_str_list[i])
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=1,
display_str=None,
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: string to display in box
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = PIL.ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
```
These utilities are used to visualize the data and predictions.
```
#@title Visualization Utilities [RUN ME]
"""
This cell contains helper functions used for visualization
and downloads only.
You can skip reading it, as there is very
little Keras or Tensorflow related code here.
"""
# Matplotlib config
plt.rc('image', cmap='gray')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)
def dataset_to_numpy_util(training_dataset, validation_dataset, N):
# get one batch from each: 10000 validation digits, N training digits
batch_train_ds = training_dataset.unbatch().batch(N)
# eager execution: loop through datasets normally
if tf.executing_eagerly():
for validation_digits, (validation_labels, validation_bboxes) in validation_dataset:
validation_digits = validation_digits.numpy()
validation_labels = validation_labels.numpy()
validation_bboxes = validation_bboxes.numpy()
break
for training_digits, (training_labels, training_bboxes) in batch_train_ds:
training_digits = training_digits.numpy()
training_labels = training_labels.numpy()
training_bboxes = training_bboxes.numpy()
break
# these were one-hot encoded in the dataset
validation_labels = np.argmax(validation_labels, axis=1)
training_labels = np.argmax(training_labels, axis=1)
return (training_digits, training_labels, training_bboxes,
validation_digits, validation_labels, validation_bboxes)
# create digits from local fonts for testing
def create_digits_from_local_fonts(n):
font_labels = []
img = PIL.Image.new('LA', (75*n, 75), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1
font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)
font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)
d = PIL.ImageDraw.Draw(img)
for i in range(n):
font_labels.append(i%10)
d.text((7+i*75,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)
font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)
font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [75, 75*n]), n, axis=1), axis=0), [n, 75*75])
return font_digits, font_labels
# utility to display a row of digits with their predictions
def display_digits_with_boxes(digits, predictions, labels, pred_bboxes, bboxes, iou, title):
n = 10
indexes = np.random.choice(len(predictions), size=n)
n_digits = digits[indexes]
n_predictions = predictions[indexes]
n_labels = labels[indexes]
n_iou = []
if len(iou) > 0:
n_iou = iou[indexes]
if (len(pred_bboxes) > 0):
n_pred_bboxes = pred_bboxes[indexes,:]
if (len(bboxes) > 0):
n_bboxes = bboxes[indexes,:]
n_digits = n_digits * 255.0
n_digits = n_digits.reshape(n, 75, 75)
fig = plt.figure(figsize=(20, 4))
plt.title(title)
plt.yticks([])
plt.xticks([])
for i in range(10):
ax = fig.add_subplot(1, 10, i+1)
bboxes_to_plot = []
if (len(pred_bboxes) > i):
bboxes_to_plot.append(n_pred_bboxes[i])
if (len(bboxes) > i):
bboxes_to_plot.append(n_bboxes[i])
img_to_draw = draw_bounding_boxes_on_image_array(image=n_digits[i], boxes=np.asarray(bboxes_to_plot), color=['red', 'green'], display_str_list=["true", "pred"])
plt.xlabel(n_predictions[i])
plt.xticks([])
plt.yticks([])
if n_predictions[i] != n_labels[i]:
ax.xaxis.label.set_color('red')
plt.imshow(img_to_draw)
if len(iou) > i :
color = "black"
if (n_iou[i][0] < iou_threshold):
color = "red"
ax.text(0.2, -0.3, "iou: %s" %(n_iou[i][0]), color=color, transform=ax.transAxes)
# utility to display training and validation curves
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
```
## Selecting Between Strategies
### TPU or GPU detection
Depending on the hardware available, you'll use different distribution strategies. For a review on distribution strategies, please check out the second course in this specialization ["Custom and Distributed Training with TensorFlow"](https://www.coursera.org/learn/custom-distributed-training-with-tensorflow), week 4, "Distributed Training".
- If the TPU is available, then you'll be using the TPU Strategy.
Otherwise:
- If more than one GPU is available, then you'll use the Mirrored Strategy
- If one GPU is available or if just the CPU is available, you'll use the default strategy.
```
# Detect hardware
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection
except ValueError:
tpu = None
gpus = tf.config.experimental.list_logical_devices("GPU")
# Select appropriate distribution strategy
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu) # Going back and forth between TPU and host is expensive. Better to run 128 batches on the TPU before reporting back.
print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])
elif len(gpus) > 1:
strategy = tf.distribute.MirroredStrategy([gpu.name for gpu in gpus])
print('Running on multiple GPUs ', [gpu.name for gpu in gpus])
elif len(gpus) == 1:
strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU
print('Running on single GPU ', gpus[0].name)
else:
strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU
print('Running on CPU')
print("Number of accelerators: ", strategy.num_replicas_in_sync)
```
### Parameters
The global batch size is the batch size per replica (64 in this case) times the number of replicas in the distribution strategy.
```
BATCH_SIZE = 64 * strategy.num_replicas_in_sync # Gobal batch size.
# The global batch size will be automatically sharded across all
# replicas by the tf.data.Dataset API. A single TPU has 8 cores.
# The best practice is to scale the batch size by the number of
# replicas (cores). The learning rate should be increased as well.
```
## Loading and Preprocessing the Dataset
Define some helper functions that will pre-process your data:
- `read_image_tfds`: randomly overlays the "digit" image on top of a larger canvas.
- `get_training_dataset`: loads data and splits it to get the training set.
- `get_validation_dataset`: loads and splits the data to get the validation set.
```
'''
Transforms each image in dataset by pasting it on a 75x75 canvas at random locations.
'''
def read_image_tfds(image, label):
xmin = tf.random.uniform((), 0 , 48, dtype=tf.int32)
ymin = tf.random.uniform((), 0 , 48, dtype=tf.int32)
image = tf.reshape(image, (28,28,1,))
image = tf.image.pad_to_bounding_box(image, ymin, xmin, 75, 75)
image = tf.cast(image, tf.float32)/255.0
xmin = tf.cast(xmin, tf.float32)
ymin = tf.cast(ymin, tf.float32)
xmax = (xmin + 28) / 75
ymax = (ymin + 28) / 75
xmin = xmin / 75
ymin = ymin / 75
return image, (tf.one_hot(label, 10), [xmin, ymin, xmax, ymax])
'''
Loads and maps the training split of the dataset using the map function. Note that we try to load the gcs version since TPU can only work with datasets on Google Cloud Storage.
'''
def get_training_dataset():
with strategy.scope():
dataset = tfds.load("mnist", split="train", as_supervised=True, try_gcs=True)
dataset = dataset.map(read_image_tfds, num_parallel_calls=16)
dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)
dataset = dataset.repeat() # Mandatory for Keras for now
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed
dataset = dataset.prefetch(-1) # fetch next batches while training on the current one (-1: autotune prefetch buffer size)
return dataset
'''
Loads and maps the validation split of the dataset using the map function. Note that we try to load the gcs version since TPU can only work with datasets on Google Cloud Storage.
'''
def get_validation_dataset():
dataset = tfds.load("mnist", split="test", as_supervised=True, try_gcs=True)
dataset = dataset.map(read_image_tfds, num_parallel_calls=16)
#dataset = dataset.cache() # this small dataset can be entirely cached in RAM
dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch
dataset = dataset.repeat() # Mandatory for Keras for now
return dataset
# instantiate the datasets
with strategy.scope():
training_dataset = get_training_dataset()
validation_dataset = get_validation_dataset()
```
### Visualize Data
```
(training_digits, training_labels, training_bboxes,
validation_digits, validation_labels, validation_bboxes) = dataset_to_numpy_util(training_dataset, validation_dataset, 10)
display_digits_with_boxes(training_digits, training_labels, training_labels, np.array([]), training_bboxes, np.array([]), "training digits and their labels")
display_digits_with_boxes(validation_digits, validation_labels, validation_labels, np.array([]), validation_bboxes, np.array([]), "validation digits and their labels")
```
## Define the Network
Here, you'll define your custom CNN.
- `feature_extractor`: these convolutional layers extract the features of the image.
- `classifier`: This define the output layer that predicts among 10 categories (digits 0 through 9)
- `bounding_box_regression`: This defines the output layer that predicts 4 numeric values, which define the coordinates of the bounding box (xmin, ymin, xmax, ymax)
- `final_model`: This combines the layers for feature extraction, classification and bounding box prediction.
- Notice that this is another example of a branching model, because the model splits to produce two kinds of output (a category and set of numbers).
- Since you've learned to use the Functional API earlier in the specialization (course 1), you have the flexibility to define this kind of branching model!
- `define_and_compile_model`: choose the optimizer and metrics, then compile the model.
```
'''
Feature extractor is the CNN that is made up of convolution and pooling layers.
'''
def feature_extractor(inputs):
x = tf.keras.layers.Conv2D(16, activation='relu', kernel_size=3, input_shape=(75, 75, 1))(inputs)
x = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = tf.keras.layers.Conv2D(32,kernel_size=3,activation='relu')(x)
x = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = tf.keras.layers.Conv2D(64,kernel_size=3,activation='relu')(x)
x = tf.keras.layers.AveragePooling2D((2, 2))(x)
return x
'''
dense_layers adds a flatten and dense layer.
This will follow the feature extraction layers
'''
def dense_layers(inputs):
x = tf.keras.layers.Flatten()(inputs)
x = tf.keras.layers.Dense(128, activation='relu')(x)
return x
'''
Classifier defines the classification output.
This has a set of fully connected layers and a softmax layer.
'''
def classifier(inputs):
classification_output = tf.keras.layers.Dense(10, activation='softmax', name = 'classification')(inputs)
return classification_output
'''
This function defines the regression output for bounding box prediction.
Note that we have four outputs corresponding to (xmin, ymin, xmax, ymax)
'''
def bounding_box_regression(inputs):
bounding_box_regression_output = tf.keras.layers.Dense(units = '4', name = 'bounding_box')(inputs)
return bounding_box_regression_output
def final_model(inputs):
feature_cnn = feature_extractor(inputs)
dense_output = dense_layers(feature_cnn)
'''
The model branches here.
The dense layer's output gets fed into two branches:
classification_output and bounding_box_output
'''
classification_output = classifier(dense_output)
bounding_box_output = bounding_box_regression(dense_output)
model = tf.keras.Model(inputs = inputs, outputs = [classification_output, bounding_box_output])
return model
def define_and_compile_model(inputs):
model = final_model(inputs)
model.compile(optimizer='adam',
loss = {'classification' : 'categorical_crossentropy',
'bounding_box' : 'mse'
},
metrics = {'classification' : 'accuracy',
'bounding_box' : 'mse'
})
return model
with strategy.scope():
inputs = tf.keras.layers.Input(shape=(75, 75, 1,))
model = define_and_compile_model(inputs)
# print model layers
model.summary()
```
### Train and validate the model
Train the model.
- You can choose the number of epochs depending on the level of performance that you want and the time that you have.
- Each epoch will take just a few seconds if you're using the TPU.
```
EPOCHS = 10 # 45
steps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset
validation_steps = 1
history = model.fit(training_dataset,
steps_per_epoch=steps_per_epoch, validation_data=validation_dataset, validation_steps=validation_steps, epochs=EPOCHS)
loss, classification_loss, bounding_box_loss, classification_accuracy, bounding_box_mse = model.evaluate(validation_dataset, steps=1)
print("Validation accuracy: ", classification_accuracy)
plot_metrics("classification_loss", "Classification Loss")
plot_metrics("bounding_box_loss", "Bounding Box Loss")
```
## Intersection over union
Calculate the I-O-U metric to evaluate the model's performance.
```
def intersection_over_union(pred_box, true_box):
xmin_pred, ymin_pred, xmax_pred, ymax_pred = np.split(pred_box, 4, axis = 1)
xmin_true, ymin_true, xmax_true, ymax_true = np.split(true_box, 4, axis = 1)
smoothing_factor = 1e-10
xmin_overlap = np.maximum(xmin_pred, xmin_true)
xmax_overlap = np.minimum(xmax_pred, xmax_true)
ymin_overlap = np.maximum(ymin_pred, ymin_true)
ymax_overlap = np.minimum(ymax_pred, ymax_true)
pred_box_area = (xmax_pred - xmin_pred) * (ymax_pred - ymin_pred)
true_box_area = (xmax_true - xmin_true) * (ymax_true - ymin_true)
overlap_area = np.maximum((xmax_overlap - xmin_overlap), 0) * np.maximum((ymax_overlap - ymin_overlap), 0)
union_area = (pred_box_area + true_box_area) - overlap_area
iou = (overlap_area + smoothing_factor) / (union_area + smoothing_factor)
return iou
```
### Visualize predictions
The following code will make predictions and visualize both the classification and the predicted bounding boxes.
- The true bounding box labels will be in green, and the model's predicted bounding boxes are in red.
- The predicted number is shown below the image.
```
# recognize validation digits
predictions = model.predict(validation_digits, batch_size=64)
predicted_labels = np.argmax(predictions[0], axis=1)
predicted_bboxes = predictions[1]
iou = intersection_over_union(predicted_bboxes, validation_bboxes)
iou_threshold = 0.6
print("Number of predictions where iou > threshold(%s): %s" % (iou_threshold, (iou >= iou_threshold).sum()))
print("Number of predictions where iou < threshold(%s): %s" % (iou_threshold, (iou < iou_threshold).sum()))
display_digits_with_boxes(validation_digits, predicted_labels, validation_labels, predicted_bboxes, validation_bboxes, iou, "True and Predicted values")
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
from rekall import Interval, IntervalSet, IntervalSetMapping, Bounds3D
from rekall.predicates import *
from vgrid import VGridSpec, VideoMetadata, VideoBlockFormat, FlatFormat, SpatialType_Bbox, SpatialType_Keypoints, Metadata_Keypoints
from vgrid_jupyter import VGridWidget
import os, json
import pandas as pd
from const import *
```
### Load in video data.
```
SINGLE_VIDEO = True
test_id = 2
# load in video metadata
video_collection_intel = [
{'num_frames': 3053, 'height': 720, 'width': 406, 'fps': 29.84, 'filename': 'dally_sy.mp4', 'id': 0},
{'num_frames': 1488, 'height': 720, 'width': 1280, 'fps': 30, 'filename': 'hip_emily.mp4', 'id': 1},
{'num_frames': 2062, 'height': 720, 'width': 1280, 'fps': 30, 'filename': '7thsense_mayee.mp4', 'id': 2}
]
if SINGLE_VIDEO:
video_collection_intel = [video_collection_intel[2]]
video_metadata_intel = [
VideoMetadata(v["filename"], v["id"], v["fps"], int(v["num_frames"]), v["width"], v["height"])
for v in video_collection_intel
]
# load in openpose output data
json_dir_sy = "C:/Users/heidi/Documents/seniorproject/openpose-1.5.1-binaries-win64-only_cpu-python-flir-3d/openpose-1.5.1-binaries-win64-only_cpu-python-flir-3d/openpose/long_output/"
json_dir_emily = "C:/Users/heidi/Documents/seniorproject/data/hip_output/"
json_dir_mayee = "C:/Users/heidi/Documents/seniorproject/data/7thsense_output/"
json_dirs = [json_dir_sy, json_dir_emily, json_dir_mayee]
# json_dirs = [json_dir_mayee]
if SINGLE_VIDEO:
json_dirs = [json_dirs[test_id]]
def load_openpose_data(keypoint_files, vm):
"""
Loads openpose data for single video.
Arguments:
keypoint_files: list of keypoint.json file names for one video
each json file contains dictionary of openpose output info for one frame.
vm: VideoMetadata object for one video
Returns:
frame_list: list of frames where each frame is a dictionary of part idxs to points {0: [x y conf], 1: etc.}
"""
frame_list = []
for js in keypoint_files:
with open(js) as json_file:
keypoint_data = json.load(json_file)
# if (len(keypoint_data['people']) > 1):
# print (js)
if (len(keypoint_data['people']) != 0 and len(keypoint_data['people'][0]) != 0):
pose_keypoints_2d = keypoint_data['people'][0]['pose_keypoints_2d']
else: # fill in empty frames w/ 0s
pose_keypoints_2d = [0 for i in range(75)]
part_data = {}
for index in BODY25_MAPPING:
keypoint_index = index * POINTS_PER_PART
part_data[index] = pose_keypoints_2d[keypoint_index : keypoint_index + POINTS_PER_PART]
if len(part_data[index]) != 0: #normalize
part_data[index][0] /= vm.width
part_data[index][1] /= vm.height
frame_list.append(part_data)
return frame_list
data_list = []
for path_to_json, vm in zip(json_dirs, video_metadata_intel):
keypoint_files = [os.path.join(path_to_json, pos_json) for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]
data_list.append(load_openpose_data(keypoint_files, vm))
#TODO expand for more than one vid
frame_list = data_list[-1]
```
### Annotate videos with open pose data, bounding boxes.
```
def get_op_bbox(frame):
"""
Arguments:
frame: dictionary of joint indices to normalized coords [x, y, conf]. ie {0: [.5, .5, .98]}
Returns 4 normalized bounding box coordinates x1, x2, y1, y2
"""
x1 = 1
x2 = 0
y1 = 1
y2 = 0
for key in frame:
joint = frame[key]
if len(joint) != 0:
if (joint[0] != 0):
x1 = min(x1, joint[0])
x2 = max(x2, joint[0])
if (joint[1] != 0):
y1 = min(y1, joint[1])
y2 = max(y2, joint[1])
return x1, x2, y1, y2
# create intervalset mapping w/ pose visualizing data
vm = video_metadata_intel[test_id]
bboxes = [get_op_bbox(frame) for frame in frame_list]
interval_mapping = IntervalSetMapping({
vm.id: IntervalSet([
Interval(
Bounds3D(
t1 = frame_num / vm.fps,
t2 = (frame_num + 1) / vm.fps,
x1 = bboxes[frame_num][0],
x2 = bboxes[frame_num][1],
y1 = bboxes[frame_num][2],
y2 = bboxes[frame_num][3]
),
{'spatial_type': SpatialType_Keypoints(),
'metadata': {
# This function can also parse faces and hands
'pose': Metadata_Keypoints(pose, BODY25_EDGES)
}
}
)
for frame_num, pose in enumerate(frame_list)
])
})
```
### Visualize OP data
```
# visualize
def visualize_helper(video_metadata_intel, interval_mapping):
vgrid_spec = VGridSpec(
video_meta = video_metadata_intel,
vis_format = VideoBlockFormat(imaps = [
('bboxes', interval_mapping)
]),
video_endpoint = 'http://localhost:8000'
)
return VGridWidget(vgrid_spec = vgrid_spec.to_json_compressed())
visualize_helper(video_metadata_intel, interval_mapping)
```
### Generate interval mapping for example dance move: hands up
```
def get_coord(interval, joint_idx, coord_idx):
md = interval['payload']['metadata']['pose'].to_json()
return md['args']['keypoints'][joint_idx][coord_idx]
Rwrist = 4
Lwrist = 7
Neck = 15 # this is actually eyes but ... forget it
hands_up = interval_mapping.filter(lambda interval:
get_coord(interval, Rwrist, 1) < get_coord(interval, Neck, 1)
and get_coord(interval, Lwrist, 1) < get_coord(interval, Neck, 1))
visualize_helper(video_metadata_intel, hands_up)
```
### Hand annotate video for this move.
```
#HAND ANNOTATE
vgrid_spec = VGridSpec(
video_meta = video_metadata_intel,
vis_format = VideoBlockFormat(imaps = [
('bboxes', interval_mapping)
]),
video_endpoint = 'http://localhost:8000'
)
widget = VGridWidget(vgrid_spec = vgrid_spec.to_json_compressed())
widget
# widget_output = hands_up[0].get_intervals()[0]['payload']
output_widget = True
widget_file = '7th_sense_mayee_annotated.json'
if output_widget:
with open(widget_file, 'w') as f:
json.dump(widget.label_state, f)
# TIME TO START EVALUATING YO
# with open(widget_file, 'w') as f:
# widget_labels = json.loads(f)
```
## Evaluate results.
```
import math
import numpy as np
from sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score #inputs: y_truth, y_predict
def get_action_segments(interval_list, vm, segment_length = 1):
action_segments = np.zeros(math.ceil((vm.num_frames / vm.fps) / segment_length))
for interval in interval_list:
bounds = interval['bounds']
t1 = bounds['t1']
t2 = bounds['t2']
while (t1 <= t2):
segment_idx = math.floor(t1 / segment_length)
action_segments[segment_idx] = 1 #mark as 1 for an event!
t1 += segment_length
return action_segments
def evaluate(intervals_predict, intervals_truth, vm, segment_length):
y_predict = get_action_segments(intervals_predict, vm, segment_length)
y_truth = get_action_segments(intervals_truth, vm, segment_length)
print("Evaluating with segment length = {}s".format(segment_length))
print("Recall: {}".format(recall_score(y_truth, y_predict)))
print("Precision: {}".format(precision_score(y_truth, y_predict)))
print("F1 score: {}".format(f1_score(y_truth, y_predict)))
print("Accuracy: {}".format(accuracy_score(y_truth, y_predict)))
print()
interval_list = widget.label_state['block_labels']['0']['new_intervals']
vm = video_metadata_intel[-1]
segment_length = .1 # in terms of seconds / times instead of # frames, since time is a constant measure across vids and is more intuitive
rekall_labels = hands_up[test_id].get_intervals()
hand_labels = interval_list
for i in range(1, 20, 2):
segment_length = i / 10.0
evaluate(rekall_labels, hand_labels, vm, segment_length)
```
### Now, try it with Gaussian smoothing
```
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
def evaluate_gaussian(intervals_predict, intervals_truth, vm, segment_length=1, sigma=.5, visualize=True):
y_predict_raw = get_action_segments(intervals_predict, vm, segment_length)
y_truth_raw = get_action_segments(intervals_truth, vm, segment_length)
#smooth
y_predict = gaussian_filter(y_predict_raw, sigma)
y_truth = gaussian_filter(y_truth_raw, sigma)
if visualize:
plt.scatter(range(len(y_predict_raw)), y_predict_raw, label="original")
plt.scatter(range(len(y_predict)), y_predict, label="smoothed")
plt.legend()
plt.title("Predictions")
plt.show()
#calculate recall
print("Evaluating with segment length = {}s, sigma = {}s".format(segment_length, sigma))
relative_score = y_predict.dot(y_truth)
print ("Relative score (cont. dot product): {}".format(relative_score))
print()
# print([round(num,3) for num in y_predict])
# print("Evaluating with segment length = {}s and gaussian weighting".format(segment_length))
# print("Recall: {}".format(recall_score(y_truth, y_predict)))
# print("Precision: {}".format(precision_score(y_truth, y_predict)))
# print("F1 score: {}".format(f1_score(y_truth, y_predict)))
# print()
for i in range(1, 20, 2):
segment_length = i / 10.0
evaluate_gaussian(rekall_labels, hand_labels, vm, segment_length, visualize=False)
```
### Experiment w/ preprocessing / smoothing prediction data to human capabilities.
| github_jupyter |
# Pandas Basics: An Introduction
## Reading Data
```
import pandas as pd
# reading data
pd.read_excel("../data/LungCapData.xls")
# store in a variable
df = pd.read_excel("../data/LungCapData.xls")
# show entire dataframe
df
```
## Pandas Display Options and the methods `head()` & `tail()`
```
# show entire dataframe
df
# print dataframe
print(df)
# max rows
pd.options.display.max_rows
# min rows
pd.options.display.min_rows
# examine first few rows(default 5 rows)
df.head()
# examine first few rows(n = 10)
df.head(10)
# examine first few rows(n = 10)
df.head(n = 2)
# examine last few rows(default = 5)
df.tail()
# examine first few rows(n = 10)
df.tail(10)
# examine first few rows(n = 10)
df.tail(n = 10)
```
### Sampling
```
# randomly shows 10 rows
df.sample(10)
```
### First Data Inspection
```
# examine dataframe
df
# information about dataframe
df.info()
# summary statistics of entire dataset
df.describe()
# include categorical data
df.describe(include = "object")
```
## Python Built-in Functions & DataFrame Attributes and Methods
### DataFrames and Python Built-in Functions
```
type(df)
len(df)
round(df, 0)
min(df)
max(df)
```
## DataFrame Attributes
```
df.shape
df.size
df.index
df.columns
```
## DataFrame Methods
```
df.head(n = 2)
df.info()
df.min()
```
## Method Chaining
```
# chaining method
df.mean().sort_values().head(2)
```
## Selecting Columns
```
df.head()
df["Age"]
type(df["Age"])
# select multiple columns
df[["Age", "Gender"]]
# type
type(df[["Age", "Gender"]])
df[["Gender", "Age"]]
df[["Gender", "Age", "LungCap"]]
type(df[["Age"]])
```
## Selecting one Column with "dot notation"
```
df.Age
df['Age']
df.Age.equals(df["Age"])
df.Gender
```
### Position-based Indexing and Slicing with iloc[]
```
import pandas as pd
df = pd.read_csv("../data/covid19.csv", index_col = "Country/Region")
df
df.info()
```
### Selecting Rows with iloc[]
```
df.iloc[0]
type(df.iloc[0])
df.iloc[1]
df.iloc[-1]
df.iloc[[1, 2, 3]]
df.iloc[1:4]
df.iloc[:5]
df.iloc[-5:]
df.iloc[:]
df.iloc[[2, 45, 5467]]
```
### Indexing/Slicing Rows and Columns with iloc[]
```
df.head(10)
df.iloc[0, 4]
df.iloc[0, :3]
df.iloc[0, [0, 2]]
df.iloc[34:39, [0, 2, 5]]
```
### Selecting Columns with iloc[]
```
df.iloc[:, 4].equals(df.Confirmed)
df["Province/State"]
```
### Label-based Indexing and Slicing with loc[]
### Selecting Rows with loc[]
```
df.columns
df.loc["Mainland China"]
len(df.loc['Mainland China'])
```
### Indexing/Slicing Rows and Columns with loc[]
```
df = pd.read_csv('../data/covid19.csv', index_col="Country/Region")
df.head()
df.loc["Mainland China", ["Confirmed", "Deaths"]].head()
df.loc[["Mainland China", "Bangladesh"], ["Deaths", "Recovered"]].head()
df.loc[:, ["Confirmed", "Deaths"]].head()
```
### Indexing and Slicing with reindex()
```
import pandas as pd
df = pd.read_csv("../data/covid19.csv")
df
#summer.loc[[0, 5, 30000, 40000], ["Athlete", "Medal"]]
df.reindex(index = [0, 5, 30000, 40000], columns = ["Confirmed", "Deaths", "Recovered"])
df = pd.read_csv("../data/covid19.csv", index_col = "Country/Region")
df.reindex(columns = ["Confirmed", "Deaths"])
```
## Summary and Outlook
### Importing from CSV and first Inspection
```
import pandas as pd
df = pd.read_csv("../data/covid19.csv", index_col = "Country/Region")
df.head()
df.info()
```
### Selecting one Column
```
df.Confirmed
df["Deaths"]
```
### Selecting multiple Columns
```
df[["Confirmed", "Deaths"]].head()
df.loc[:, ["Confirmed", "Deaths"]].head()
```
### Selecting positional rows
```
df.iloc[10:21]
```
### Selecting labeled rows
```
df.loc["Mainland China"]
```
### Putting it all together
```
df[["Confirmed", "Deaths", "Recovered"]].loc["Mainland China"]
df[["Deaths", "Recovered"]].loc["Bangladesh"]
df[["Confirmed", "Deaths", "Recovered"]].loc[["Mainland China", "Bangladesh"]]
```
### Outlook Pandas Objects
```
df
type(df)
df["Last Update"]
type(df["Last Update"])
df.columns
type(df.columns)
df.index
type(df.index)
```
## Advanced Indexing and Slicing (optional)
```
import pandas as pd
df = pd.read_csv("../data/covid19.csv")
df.head()
```
### Case 1: Getting the first 5 rows and rows 354 and 765
```
rows = list(range(5)) + [354, 765]
rows
df.iloc[rows]
```
### Case 2: Getting the first three columns and the columns "Confirmed" and "Deaths"
```
df.columns[:3].to_list() + ["Confirmed", "Deaths"]
col = df.columns[:3].to_list() + ["Confirmed", "Deaths"]
col
df.loc[:, col]
```
### Case 3: Combining Position- and label-based Indexing: Rows at Positions 200 and 300 and columns "Confirmed" and "Deaths"
```
df
df.loc[[200, 300], ["Confirmed", "Deaths"]]
```
| github_jupyter |
# Lists vs Arrays
### Importing numpy
```
import numpy as np
l = [1,2,3]
print(l)
a = np.array([1,2,3])
print(a)
for num in l:
print(num)
for num in a:
print(num)
l.append(4)
l = l + [5]
print(l)
a.append(4)
a = a + [4,5]
```
### Vector addition list vs numpy array
```
lsum1 = []
for num in l:
lsum1.append(num + num)
print(lsum1)
lsum2 = a + a
print(lsum2)
#Also
a * 2
l * 2
```
### Squaring each element list vs numpy array
```
l ** 2
a ** 2
```
### Elementwise square root
```
np.sqrt(a)
```
### Elementwise log
```
np.log(a)
```
### Elementwise exponent
```
np.exp(a)
```
# Dot product 1: For loop vs. cosine method vs. dot function
```
a = np.array([1,2])
b = np.array([2,1])
# Calculating dot product, traditional way
dot = 0
for num1, num2 in zip(a,b):
dot += num1 * num2
print(dot)
a * b
np.sum(a * b)
(a * b).sum()
#Numpy dot function to find dot product
np.dot(a, b)
a.dot(b)
b.dot(a)
# Calculating length of a vector |a| traditional way
amagnitude = np.sqrt((a * a).sum()) #where (a * a).sum() = 5 and (a * a) = [1, 4]
print(amagnitude)
# Linear algebra library of numpy
print(np.linalg.norm(a))
#Therefore, the cosine angle (please refer lecture notes for more info) is given as
cosineangle = a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b))
print(cosineangle)
#Actual angle is given as, by default in radians
angle = np.arccos(cosineangle)
print(angle)
#So, let's check if a.b = |a||b|cos(theta)ab
print(np.linalg.norm(a) * np.linalg.norm(b) * cosineangle)
```
# Dot product 2: Speed comparison
```
# https://github.com/mdalvi/reinforcement-learning-tuts
```
# Vectors and Matrices
```
import numpy as np
#Matrix is largely not but list with list or array within array
M = np.array([[1,2], [3,4]])
L= [[1,2], [3,4]]
L[0]
L[0][0]
M[0][0]
M[0,0]
M2 = np.matrix([[1,2], [3,4]])
M2
A = np.array(M2)
A
#Transpose of matrix
A.T
```
# Generating Matrices to Work With
```
import numpy as np
np.array([1,2,3])
Z = np.zeros(10)
Z
#10 x10 matrix of zeros, accepts of tuple of specified dimensions
Z = np.zeros((10 , 10))
Z
#For one's, accepts of tuple of specified dimensions
O = np.ones((10, 10))
O
# Values 0 > value < 1
R = np.random.random((10, 10))
R
#Whenever create a random number matrix,
#we should be intrested in the probability distribution that the random numbers came from
# np.random.random gives numbers from unform distribution between 0 and 1
# For guassian distribution
G = np.random.randn((10,10))
# NOTE the randn takes each size of dimension individually, and not as tuple
G = np.random.randn(10,10)
G
# Above numbers are from guassian distribution of mean 0 and stddev 1
G.mean() # gives us the mean
G.var() # gives us the variance
```
# More Matrix Operations
```
A = np.array([[1,2],[3,4]])
A
Ainv = np.linalg.inv(A)
Ainv
Ainv.dot(A) # should give us the identity matrix
A.dot(Ainv) # should give us the identity matrix
# https://www.mathsisfun.com/algebra/matrix-determinant.html
np.linalg.det(A)
np.diag(A)
np.diag([1,2])
# Outer product
a = np.array([1,2])
b = np.array([3,4])
np.outer(a,b)
# inner product and dot product are same
np.inner(a,b)
a.dot(b)
# Summing up all values across diagonal
np.diag(A).sum()
np.trace(A)
# Computing eigen values and eigen vectors
# Convention is, each sample will take up a row and each feature a column
# Thus creating an input of 100 samples with 3 features each
X = np.random.randn(100,3)
X
# Let's compute co-variance
cov = np.cov(X)
cov.shape
# Thus whenever calculating co-variance of data matrix, you must transpose it first
cov = np.cov(X.T)
cov
# Let's compute the eigen values and eigen vectors
np.linalg.eigh(cov)
# This also gives same answer as eigh but the result might be in different order
np.linalg.eig(cov)
```
# Solving a Linear System
Form of a linear system is Ax = b
where
A is a matrix,
x is a column vector of values we are going to solve for
b is a vector of numbers.
The solution is multiply by A(inverse) on both sides. We are allowed to do that because we are assuming A is a square matrix meaning if it's invertible then x has a unique solution.
In other words if the dimensionality of x is D then we have D equations and D unknowns.
```
# Let's do it
A = np.array([[1,2],[3,4]])
A
b = np.array([1,2])
b
x = np.linalg.inv(A).dot(b)
x
# or you can use the built in function solve
x = np.linalg.solve(A, b)
x
```
# Word Problem
```
A = np.array([[1,1],[1.5,4]])
b = np.array([2200,5050])
np.linalg.solve(A, b)
```
# Padas
# Manual Data Loading
```
import numpy as np
X = []
for line in open('data_2d.csv'):
row = line.split(',')
sample = map(float, row)
X.append(list(sample))
X = np.array(X)
print(X)
```
# DataFrames
```
import pandas as pd
X = pd.read_csv('data_2d.csv', header=None)
type(X)
X.info()
X.head()
X.head(10)
```
# More about DataFrames: Selecting Rows and Columns
```
X[0,0]
M = X.as_matrix()
type(M)
X[0]
```
### Numpy: X[0] -> 0th row
### Pandas: X[0] -> column that has name 0
```
type(X[0])
#Thus, pandas datafromes are for 2-d objects,
# while pandas series are for 1-d objects
#Fetching row from pandas dataframe
print(X.iloc[0])
print(X.ix[0])
type(X.iloc[0])
#Selecting the 0th and 2nd column
print(X[[0,2]])
# Finding/Filtering all rows with 0the column at less than 5
print(X[X[0] < 5])
print(X[0] < 5)
type(X[0] < 5)
```
# Even more about DataFrames: Column Names
```
# skipfooter is incompatible with default csv read engine i.e. C, hence, engine='python'
df = pd.read_csv('international-airline-passengers.csv', engine='python', skipfooter=3)
print(df.columns)
df.columns = ['month', 'passengers']
print(df.columns)
df['passengers']
df.passengers
type(df.passengers)
df['ones'] = 1
df.head()
```
# The apply() function
```
from datetime import datetime
df['dt'] = df.apply(lambda row: datetime.strptime(row['month'], "%Y-%m"), axis=1)
df.head()
df.info()
```
# join()
```
t1 = pd.read_csv('table1.csv')
t2 = pd.read_csv('table2.csv')
print(t1)
print(t2)
m = pd.merge(t1, t2, on='user_id')
m
t1.merge(t2, on='user_id')
```
# Line Chart
```
import matplotlib.pyplot as plt
import numpy as np
# create a vector as described by start, end, number of points
x = np.linspace(0, 10, 10)
# elementwise sine of matrix x
y = np.sin(x)
plt.plot(x, y)
plt.show()
plt.plot(x,y)
plt.xlabel('Time')
plt.ylabel('Sine function')
plt.title('My first chart')
plt.show()
# To make our sine wave more smooth
x = np.linspace(0,10,1000)
y = np.sin(x)
plt.plot(x,y)
plt.xlabel('Time')
plt.ylabel('Sine function')
plt.title('My first chart')
plt.show()
```
# Scatterplot
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
A = pd.read_csv('data_1d.csv', header=None).as_matrix()
# Let x and y be the 1st and 2nd column of the dataset respectively
x = A[:,0] # Grab everything from all rows and 0th column
y = A[:,1] # Grab everything from all rows and 1st column
plt.scatter(x,y)
plt.show()
x_line = np.linspace(0,100,100)
y_line = 2*x_line + 1 # Assuming we happpen to know the best fit function
plt.scatter(x, y)
plt.plot(x_line, y_line)
plt.show()
```
# Histogram
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
A = pd.read_csv('data_1d.csv', header=None).as_matrix()
x = A[:,0]
y = A[:,1]
plt.hist(x)
plt.show()
# Let's try the uniform distribution of random function
R = np.random.random(100000)
plt.hist(R)
plt.show()
plt.hist(R, bins=20)
plt.show()
# Trying to plot the residual error in the linear regression dataset
y_actual = 2*x + 1
residuals = y - y_actual
plt.hist(residuals)
plt.show()
```
# Plotting Images
```
# The files required to be downloaded are located at
# https://www.kaggle.com/c/digit-recognizer/data
# Placed in the /large_files folder .ignored by git
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../large_files/train.csv')
df.shape
M = df.as_matrix()
# If we open the train.csv, we find that first column is actually the target label
# If we want to grab the first image
im = M[0, 1:] # Grab everything from first row and everything in all columns expect first column
im.shape
im = im.reshape(28, 28)
im.shape
plt.imshow(im)
plt.show()
# Checing the target lable
M[0,0]
plt.imshow(im ,cmap='gray') #setting colormap to gray
plt.show()
# inverting colors
plt.imshow(255 - im, cmap='gray')
plt.show()
```
# Gaussian PDF and CDF
### PDF - Probability density function
### CDF - Cumulative distribution function
The question:
Given a sample of random vairable what is its probability density?
```
from scipy.stats import norm
# Finding the probability density of value 0 from the standard distribution
print(norm.pdf(0))
# For gaussian with mean other 0 and std-dev other than 1
# Mean = 5, Standard deviation = 100
print(norm.pdf(0, loc=5, scale=10))
# Calculating pdf values of all elements in array
import numpy as np
r = np.random.randn(10)
r.shape
norm.pdf(r)
# When calculating the joint probability of sample data we need to multiply their
# probabilities together
# When calculating the log of joint probability, we add the individual probabilities
# Since adding is faster than multiplication, log pdf is preferred
norm.logpdf(r)
# CDF is an integral of PDF from -(infinity) to x
# Since this integral is not solvable mathematically, scipy gives a function to solve it numerically
norm.cdf(r)
norm.logcdf(r)
```
# Sampling from a Guassian Distribution (1-D)
```
import numpy as np
import matplotlib.pyplot as plt
r = np.random.randn(10000)
plt.hist(r, bins = 100)
plt.show()
# Let's start sampling
# Multiple by the new standard deviation and then add the new mean
r = 10 * np.random.randn(10000) + 5 # here the mean is 5
# This gives us the guassian distribution of std dev 10 and mean 5
# By default the np.random.randn(10000) gives GD of stdev 1 and mean 0
plt.hist(r, bins = 100)
plt.show()
```
# Sampling from a Guassian Distribution (Spherical and Axis-aligned Elliptical)
```
import numpy as np
import matplotlib.pyplot as plt
#Spherical guassian
r = np.random.randn(10000, 2)
# x = all rows of first column, y = all rows of second column
plt.scatter(r[:, 0], r[:, 1])
plt.axis('equal')
plt.show()
# Elliptical guassian
# Extending the stdev and mean of one of the dimensions
r[:, 1] = 5 * r[:, 1] + 2
plt.scatter(r[:, 0], r[:, 1])
plt.axis('equal')
plt.show()
```
# Sampling from a General Multivariate Normal
```
# General multivariate normal, where the datapoints in the various dimension
# are not independent, rather dependent on each other thus obtaining a full co-variance matrix.
cov = np.array([[1, 0.8], [0.8, 3]])
# This means we have variance of 1 in first dimension and variance of 3 in second dimension
# and the covariance between the two dimensions is 0.8
from scipy.stats import multivariate_normal as mvn
mu = np.array([0,2])
r = mvn.rvs(mean = mu, cov = cov, size = 1000)
plt.scatter(r[:, 0], r[:, 1])
plt.axis('equal')
plt.show()
r.shape
# This can be also achived as
r = np.random.multivariate_normal(mean = mu, cov = cov, size = 1000)
plt.scatter(r[:, 0], r[:, 1])
plt.axis('equal')
plt.show()
```
# Other interesting Scipy Functions
* scipy.io.loadmat - used to open .mat files
* scipy.io.wavfile.read - to read wavefiles
* scipy.io.wavfile.write - to write wavefiles
* scipy.signal module - for convolution
```
# Fast foreier transform example (exists in numpy)
# converts the signal from time domain to frequency domain
x = np.linspace(0, 10, 100000)
y = np.sin(x) + np.sin(3*x) + np.sin(5*x)
plt.plot(y)
plt.show()
Y = np.fft.fft(y) # gives signals with complex numbers
plt.plot(np.abs(Y))
plt.show()
Y = np.fft.fft(y) # gives signals with complex numbers
plt.plot(np.abs(Y))
plt.axis('equal')
plt.show()
# Decoding the original frequencies
2*np.pi*16/100
2*np.pi*48/100
2*np.pi*80/100
```
| github_jupyter |
# Bike Share System
*Modeling and Simulation in Python*
Copyright 2021 Allen Downey
License: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/)
```
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://raw.githubusercontent.com/AllenDowney/' +
'ModSimPy/master/modsim.py')
# import functions from modsim
from modsim import *
```
This chapter presents a simple model of a bike share system and
demonstrates the features of Python we'll use to develop simulations of real-world systems.
Along the way, we'll make decisions about how to model the system. In
the next chapter we'll review these decisions and gradually improve the model.
This chapter is available as a Jupyter notebook where you can read the text, run the code, and work on the exercises.
Click here to access the notebooks: <https://allendowney.github.io/ModSimPy/>.
## Modeling a Bike Share System
Imagine a bike share system for students traveling between Olin College and Wellesley College, which are about three miles apart in eastern Massachusetts.
Suppose the system contains 12 bikes and two bike racks, one at Olin and one at Wellesley, each with the capacity to hold 12 bikes.
As students arrive, check out a bike, and ride to the other campus, the number of bikes in each location changes. In the simulation, we'll need to keep track of where the bikes are. To do that, we'll use a function called `State`, which is defined in the ModSim library.
```
bikeshare = State(olin=10, wellesley=2)
```
The expressions in parentheses are *keyword arguments*.
They create two variables, `olin` and `wellesley`, and give them values.
Then we call the `State` function.
The result is a `State` object, which is a collection of *state variables*.
In this example, the state variables represent the number of
bikes at each location. The initial values are `10` and `2`, indicating that there are 10 bikes at Olin and 2 at Wellesley.
The `State` object is assigned to a new variable named `bikeshare`.
We can read the variables inside a `State` object using the *dot operator*, like this:
```
bikeshare.olin
```
And this:
```
bikeshare.wellesley
```
Or, to display the state variables and their values, you can just enter the name of the object:
```
bikeshare
```
These values make up the *state* of the system.
The ModSim library provides a function called `show` that displays a `State` object as a table.
```
show(bikeshare)
```
You don't have to use `show`, but I think the results look better.
We can update the state by assigning new values to the variables.
For example, if a student moves a bike from Olin to Wellesley, we can figure out the new values and assign them:
```
bikeshare.olin = 9
bikeshare.wellesley = 3
```
Or we can use *update operators*, `-=` and `+=`, to subtract 1 from
`olin` and add 1 to `wellesley`:
```
bikeshare.olin -= 1
bikeshare.wellesley += 1
```
The result is the same either way.
## Defining Functions
So far we have used functions defined in NumPy and the ModSim library. Now we're going to define our own functions.
When you are developing code in Jupyter, it is often efficient to write a few lines of code, test them to confirm they do what you intend, and then use them to define a new function. For example, these lines move a bike from Olin to Wellesley:
```
bikeshare.olin -= 1
bikeshare.wellesley += 1
```
Rather than repeat them every time a bike moves, we can define a new
function:
```
def bike_to_wellesley():
bikeshare.olin -= 1
bikeshare.wellesley += 1
```
`def` is a special word in Python that indicates we are defining a new
function. The name of the function is `bike_to_wellesley`. The empty
parentheses indicate that this function requires no additional
information when it runs. The colon indicates the beginning of an
indented *code block*.
The next two lines are the *body* of the function. They have to be
indented; by convention, the indentation is four spaces.
When you define a function, it has no immediate effect. The body of the
function doesn't run until you *call* the function. Here's how to call
this function:
```
bike_to_wellesley()
```
When you call the function, it runs the statements in the body, which
update the variables of the `bikeshare` object; you can check by
displaying the new state.
```
show(bikeshare)
```
When you call a function, you have to include the parentheses. If you
leave them out, you get this:
```
bike_to_wellesley
```
This result indicates that `bike_to_wellesley` is a function. You don't have to know what `__main__` means, but if you see something like this, it probably means that you named a function but didn't actually call it.
So don't forget the parentheses.
## Print Statements
As you write more complicated programs, it is easy to lose track of what
is going on. One of the most useful tools for debugging is the *print statement*, which displays text in the Jupyter notebook.
Normally when Jupyter runs the code in a cell, it displays the value of
the last line of code. For example, if you run:
```
bikeshare.olin
bikeshare.wellesley
```
Jupyter runs both lines, but it only displays the value of the
second. If you want to display more than one value, you can use
print statements:
```
print(bikeshare.olin)
print(bikeshare.wellesley)
```
When you call the `print` function, you can put a variable in
parentheses, as in the previous example, or you can provide a sequence
of variables separated by commas, like this:
```
print(bikeshare.olin, bikeshare.wellesley)
```
Python looks up the values of the variables and displays them; in this
example, it displays two values on the same line, with a space between
them.
Print statements are useful for debugging functions. For example, we can
add a print statement to `move_bike`, like this:
```
def bike_to_wellesley():
print('Moving a bike to Wellesley')
bikeshare.olin -= 1
bikeshare.wellesley += 1
```
Each time we call this version of the function, it displays a message,
which can help us keep track of what the program is doing.
The message in this example is a *string*, which is a sequence of
letters and other symbols in quotes.
Just like `bike_to_wellesley`, we can define a function that moves a
bike from Wellesley to Olin:
```
def bike_to_olin():
print('Moving a bike to Olin')
bikeshare.wellesley -= 1
bikeshare.olin += 1
```
And call it like this:
```
bike_to_olin()
```
One benefit of defining functions is that you avoid repeating chunks of
code, which makes programs smaller. Another benefit is that the name you
give the function documents what it does, which makes programs more
readable.
## If Statements
At this point we have functions that simulate moving bikes; now let's think about simulating customers. As a simple model of customer behavior, I will use a random number generator to determine when customers arrive at each station.
The ModSim library provides a function called `flip` that generates random "coin tosses".
When you call it, you provide a probability between 0 and 1, like this:
```
flip(0.7)
```
The result is one of two values: `True` with probability 0.7 (in this example) or `False`
with probability 0.3. If you run `flip` like this 100 times, you should
get `True` about 70 times and `False` about 30 times. But the results
are random, so they might differ from these expectations.
`True` and `False` are special values defined by Python.
They are called *boolean* values because they are
related to Boolean algebra (<https://modsimpy.com/boolean>).
Note that they are not strings. There is a difference between `True`, which is a boolean value, and `'True'`, which is a string.
We can use boolean values to control the behavior of the program, using an *if statement*:
```
if flip(0.5):
print('heads')
```
If the result from `flip` is `True`, the program displays the string
`'heads'`. Otherwise it does nothing.
The syntax for `if` statements is similar to the syntax for
function definitions: the first line has to end with a colon, and the
lines inside the `if` statement have to be indented.
Optionally, you can add an *else clause* to indicate what should
happen if the result is `False`:
```
if flip(0.5):
print('heads')
else:
print('tails')
```
If you run the previous cell a few times, it should print `heads` about half the time, and `tails` about half the time.
Now we can use `flip` to simulate the arrival of customers who want to
borrow a bike. Suppose students arrive at the Olin station every two
minutes on average.
In that case, the chance of an arrival during any one-minute period is 50%, and we can simulate it like this:
```
if flip(0.5):
bike_to_wellesley()
```
If students arrive at the Wellesley station every three minutes, on average,
the chance of an arrival during any one-minute period is 33%, and we can
simulate it like this:
```
if flip(0.33):
bike_to_olin()
```
We can combine these snippets into a function that simulates a *time step*, which is an interval of time, in this case one minute:
```
def step():
if flip(0.5):
bike_to_wellesley()
if flip(0.33):
bike_to_olin()
```
Then we can simulate a time step like this:
```
step()
```
Depending on the results from `flip`, this function might move a bike to Olin, or to Wellesley, or neither, or both.
## Parameters
The previous version of `step` is fine if the arrival probabilities
never change, but in reality they vary over time.
So instead of putting the constant values 0.5 and 0.33 in `step`, we can replace them with *parameters*.
Parameters are variables whose values are set when a function is called.
Here's a version of `step` that takes two parameters, `p1` and `p2`:
```
def step(p1, p2):
if flip(p1):
bike_to_wellesley()
if flip(p2):
bike_to_olin()
```
The values of `p1` and `p2` are not set inside this function; instead,
they are provided when the function is called, like this:
```
step(0.5, 0.33)
```
The values you provide when you call the function are called
*arguments*. The arguments, `0.5` and `0.33` in this example, get
assigned to the parameters, `p1` and `p2`, in order. So running this
function has the same effect as:
```
p1 = 0.5
p2 = 0.33
if flip(p1):
bike_to_wellesley()
if flip(p2):
bike_to_olin()
```
The advantage of using parameters is that you can call the same function many times, providing different arguments each time.
Adding parameters to a function is called *generalization*, because it makes the function more general; without parameters, the function always does the same thing; with parameters, it can do a range of things.
## For Loops
At some point you will get sick of running cells over and over.
Fortunately, there is an easy way to repeat a chunk of code, the *for loop*. Here's an example:
```
for i in range(3):
print(i)
bike_to_wellesley()
```
The syntax here should look familiar; the first line ends with a
colon, and the lines inside the `for` loop are indented. The other
elements of the loop are:
- The words `for` and `in` are special words we have to use in a for
loop.
- `range` is a Python function we use to control the number of times the loop runs.
- `i` is a *loop variable* that gets created when the for loop runs.
When this loop runs, it runs the statements inside the loop three times. The first time, the value of `i` is `0`; the second time, it is `1`; the third time, it is `2`.
Each time through the loop, it prints the value of `i` and moves one bike to Wellesley.
## Timeseries
When we run a simulation, we often want to save the results for later analysis. The ModSim library provides a `TimeSeries` object for this purpose. A `TimeSeries` contains a sequence of time stamps and a
corresponding sequence of quantities.
In this example, the time stamps are integers representing minutes and the quantities are the number of bikes at one location.
Since we have moved a number of bikes around, let's start again with a new `State` object.
```
bikeshare = State(olin=10, wellesley=2)
```
We can create a new, empty `TimeSeries` like this:
```
results = TimeSeries()
```
And we can add a quantity like this:
```
results[0] = bikeshare.olin
```
The number in brackets is the time stamp, also called a *label*.
We can use a `TimeSeries` inside a for loop to store the results of the simulation:
```
for i in range(3):
print(i)
step(0.6, 0.6)
results[i+1] = bikeshare.olin
```
Each time through the loop, we print the value of `i` and call `step`, which updates `bikeshare`.
Then we store the number of bikes at Olin in `results`.
We use the loop variable, `i`, to compute the time stamp, `i+1`.
The first time through the loop, the value of `i` is `0`, so the time stamp is `1`.
The last time, the value of `i` is `2`, so the time stamp is `3`.
When the loop exits, `results` contains 4 time stamps, from 0 through
3, and the number of bikes at Olin at the end of each time step.
We can display the `TimeSeries` like this:
```
show(results)
```
The left column is the time stamps; the right column is the quantities.
## Plotting
`results` provides a function called `plot` we can use to plot
the results, and the ModSim library provides `decorate`, which we can use to label the axes and give the figure a title:
```
results.plot()
decorate(title='Olin-Wellesley Bikeshare',
xlabel='Time step (min)',
ylabel='Number of bikes')
```
The result should be a plot with time on the $x$-axis and the number of bikes on the $y$-axis.
Since we only ran three time steps, it might not be very interesting.
## Summary
This chapter introduces the tools we need to run simulations, record the results, and plot them.
We used a `State` object to represent the state of the system.
Then we used the `flip` function and an `if` statement to simulate a single time step.
We used a `for` loop to simulate a series of steps, and a `TimeSeries` to record the results.
Finally, we used `plot` and `decorate` to plot the results.
In the next chapter, we will extend this simulation to make it a little more realistic.
## Exercises
Before you go on, you might want to work on the following exercises.
### Exercise 1
What happens if you spell the name of a state variable wrong? Edit the following cell, change the spelling of `wellesley`, and run it.
The error message uses the word *attribute*, which is another name for what we are calling a state variable.
```
bikeshare = State(olin=10, wellesley=2)
bikeshare.wellesley
```
### Exercise 2
Make a `State` object with a third state variable, called `downtown`, with initial value 0, and display the state of the system.
```
# Solution goes here
```
### Exercise 3
Wrap the code in the chapter in a function named `run_simulation` that takes three parameters, named `p1`, `p2`, and `num_steps`.
It should:
1. Create a `TimeSeries` object to hold the results.
2. Use a for loop to run `step` the number of times specified by `num_steps`, passing along the specified values of `p1` and `p2`.
3. After each step, it should save the number of bikes at Olin in the `TimeSeries`.
4. After the for loop, it should plot the results and
5. Decorate the axes.
To test your function:
1. Create a `State` object with the initial state of the system.
2. Call `run_simulation` with parameters `p1=0.3`, `p2=0.2`, and `num_steps=60`.
```
# Solution goes here
# Solution goes here
```
## Under the Hood
This section contains additional information about the functions we've used and pointers to their documentation.
You don't need to know anything in this section, so if you are already feeling overwhelmed, you might want to skip it.
But if you are curious, read on.
`State` and `TimeSeries` objects are based on the `Series` object defined by the Pandas library.
The documentation is at <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html>.
`Series` objects provide their own `plot` function, which is why we call it like this:
```
results.plot()
```
Instead of like this:
```
plot(results)
```
You can read the documentation of `Series.plot` at <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.plot.html>.
`decorate` is based on Matplotlib, which is a widely-used plotting library for Python. Matplotlib provides separate functions for `title`, `xlabel`, and `ylabel`.
`decorate` makes them a little easier to use.
For the list of keyword arguments you can pass to `decorate`, see <https://matplotlib.org/3.2.2/api/axes_api.html?highlight=axes#module-matplotlib.axes>.
The `flip` function uses NumPy's `random` function to generate a random number between 0 and 1, then returns `True` or `False` with the given probability.
You can get the source code for `flip` (or any other function) by running the following cell.
```
source_code(flip)
```
| github_jupyter |
# Content with notebooks
You can also create content with Jupyter Notebooks. The content for the current page is contained
in a Jupyter Notebook in the `notebooks/` folder of the repository. This means that we can include
code blocks and their outputs, and export them to Jekyll markdown.
**You can find the original notebook for this page [at this address](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb)**
## Markdown + notebooks
As it is markdown, you can embed images, HTML, etc into your posts!

You an also $add_{math}$ and
$$
math^{blocks}
$$
or
$$
\begin{align*}
\mbox{mean} la_{tex} \\ \\
math blocks
\end{align*}
$$
But make sure you \$Escape \$your \$dollar signs \$you want to keep!
## Code blocks and image outputs
Textbooks with Jupyter will also embed your code blocks and output in your site.
For example, here's some sample Matplotlib code:
```
from matplotlib import rcParams, cycler
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
# Fixing random state for reproducibility
np.random.seed(19680801)
N = 10
data = [np.logspace(0, 1, 100) + np.random.randn(100) + ii for ii in range(N)]
data = np.array(data).T
cmap = plt.cm.coolwarm
rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
Line2D([0], [0], color=cmap(.5), lw=4),
Line2D([0], [0], color=cmap(1.), lw=4)]
fig, ax = plt.subplots(figsize=(10, 5))
lines = ax.plot(data)
ax.legend(custom_lines, ['Cold', 'Medium', 'Hot']);
```
Note that the image above is captured and displayed by Jekyll.
## Removing content before publishing
You can also remove some content before publishing your book to the web. For example,
in [the original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb) there
used to be a cell below...
```
thisvariable = "none of this should show up in the textbook"
fig, ax = plt.subplots()
x = np.random.randn(100)
y = np.random.randn(100)
ax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm)
ax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes)
ax.set_axis_off()
```
You can also **remove only the code** so that images and other output still show up.
Below we'll *only* display an image. It was generated with Python code in a cell,
which you can [see in the original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb)
```
# NO CODE
thisvariable = "this plot *will* show up in the textbook."
fig, ax = plt.subplots()
x = np.random.randn(100)
y = np.random.randn(100)
ax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm)
ax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes)
ax.set_axis_off()
```
And here we'll *only* display a Pandas DataFrame. Again, this was generated with Python code
from [this original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb).
```
# NO CODE
import pandas as pd
pd.DataFrame([['hi', 'there'], ['this', 'is'], ['a', 'DataFrame']], columns=['Word A', 'Word B'])
```
You can configure the text that *Textbooks with Jupyter* uses for this by modifying your book's `_config.yml` file.
## Interactive outputs
We can even do the same for *interactive* material. Below we'll display a map using `ipyleaflet`. When the notebook
is converted to Markdown, the code for creating the interactive map is retained.
**Note that this will only work for some packages.** They need to be able to output standalone HTML/Javascript, and not
depend on an underlying Python kernel to work.
```
import folium
m = folium.Map(
location=[45.372, -121.6972],
zoom_start=12,
tiles='Stamen Terrain'
)
folium.Marker(
location=[45.3288, -121.6625],
popup='Mt. Hood Meadows',
icon=folium.Icon(icon='cloud')
).add_to(m)
folium.Marker(
location=[45.3311, -121.7113],
popup='Timberline Lodge',
icon=folium.Icon(color='green')
).add_to(m)
folium.Marker(
location=[45.3300, -121.6823],
popup='Some Other Location',
icon=folium.Icon(color='red', icon='info-sign')
).add_to(m)
m
```
| github_jupyter |
```
import numpy as np
import sys
sys.path.append(
'/global/u1/s/sfschen/Python/velocileptors/')
from matplotlib import pyplot as plt
from compute_xiell_tables import compute_xiell_tables, compute_xiells_fixedbias
rr = np.arange(50, 160, 0.1)
# Remake the data grid:
order = 4
Npoints = 2*order + 1
# these are OmegaM, h, sigma8
x0s = [0.31, 0.68, 0.73]; Nparams = len(x0s) # these are chosen to be roughly at the BOSS best fit value
dxs = [0.01, 0.01, 0.05]
output_shape = (len(rr),6)
center_ii = (order,)*Nparams
X0grid = np.zeros( (Npoints,)*Nparams+ output_shape)
X2grid = np.zeros( (Npoints,)*Nparams+ output_shape)
# Load data
for ii in range(Npoints):
for jj in range(Npoints):
for kk in range(Npoints):
#print(ii,jj,kk)
X0grid[ii,jj,kk] = np.loadtxt('data/boss_z_0.61/boss_xi0_%d_%d_%d.txt'%(ii,jj,kk))
X2grid[ii,jj,kk] = np.loadtxt('data/boss_z_0.61/boss_xi2_%d_%d_%d.txt'%(ii,jj,kk))
# Now compute the derivatives
from taylor_approximation import compute_derivatives
derivs0 = compute_derivatives(X0grid, dxs, center_ii, 4)
derivs2 = compute_derivatives(X2grid, dxs, center_ii, 4)
from taylor_approximation import taylor_approximate
import time
test_point = [0.32, 0.64, 0.6]
t1 =time.time()
xi0test = taylor_approximate(test_point, x0s, derivs0, order=3)
xi2test = taylor_approximate(test_point, x0s, derivs2, order=3)
t2 =time.time()
print(t2-t1)
xi0, xi2 = compute_xiell_tables(test_point)
ii = 0
plt.plot(rr, rr * xi0test[:,ii], label=r'$\ell=0$')
plt.plot(rr, rr * xi0[:,ii], 'k--')
plt.plot(rr, rr * xi2test[:,ii], label=r'$\ell=2$')
plt.plot(rr, rr * xi2[:,ii], 'k--', label='true')
plt.xlabel('r [Mpc/h]')
plt.ylabel(r'$r^2 \xi_\ell$')
plt.legend()
# Let's test something...
from scipy.interpolate import interp1d
test_point = [0.32, 0.64, 0.6]
xi0test, xi2test =compute_xiells_fixedbias(test_point, 2,1, dr=1.0)
xi0, xi2 = compute_xiells_fixedbias(test_point, 2,1, dr=0.1)
rr1 = np.arange(50,160,0.1)
rr2 = np.arange(50,160,1.0)
xi0test = interp1d(rr2,xi0test,kind='cubic',fill_value='extrapolate')(rr1)
xi2test = interp1d(rr2,xi2test,kind='cubic',fill_value='extrapolate')(rr1)
plt.plot(rr1, rr1**2 * xi2)
plt.plot(rr1, rr1**2 * xi2test, 'k--')
plt.plot(rr1, xi2test/xi2 - 1)
plt.ylim(-0.01,0.01)
p0planck, p2planck = compute_pell_tables(x0s)
plt.plot(kvec, kvec * p0test[:,0], label=r'$\ell=0$')
plt.plot(kvec, kvec * p0[:,0], 'k--')
plt.plot(kvec, kvec * p0planck[:,0], 'k:')
plt.plot(kvec, kvec * p2test[:,0], label=r'$\ell=2$')
plt.plot(kvec, kvec * p2[:,0], 'k--', label='true')
plt.plot(kvec, kvec * p2planck[:,0], 'k:',label='Planck')
plt.xlabel('k [h/Mpc]')
plt.ylabel(r'$P^\ell_{11}$')
plt.legend()
plt.xlim(0,0.5)
plt.ylim(-100,600)
rr.shape
# Come up with a file format:
import json
list0 = [ dd.tolist() for dd in derivs0 ]
list2 = [ dd.tolist() for dd in derivs2 ]
np.array(list0[3]).shape
outfile = 'emu/pkells_planck.json'
outdict = {'params': ['omegam', 'h', 'sigma8'],\
'x0': x0s,\
'derivs0': list0,\
'derivs2': list2}
json_file = open(outfile, 'w')
json.dump(outdict, json_file)
json_file.close()
# Let's reproduce things by reloading the saved file
json_file = open(outfile, 'r')
emu = json.load( json_file )
json_file.close()
test_point = [0.28, 0.70, 0.6]
#x0s = emu['x0']
#derivs0 = [np.array(ll) for ll in emu['derivs0']]
#derivs2 = [np.array(ll) for ll in emu['derivs2']]
p0test = taylor_approximate(test_point, x0s, derivs0)
p2test = taylor_approximate(test_point, x0s, derivs2)
p0, p2 = compute_pell_tables(test_point)
ii = 1
plt.plot(kvec, kvec * p0test[:,ii], label=r'$\ell=0$')
plt.plot(kvec, kvec * p0[:,ii], 'k--')
plt.plot(kvec, kvec * p2test[:,ii], label=r'$\ell=2$')
plt.plot(kvec, kvec * p2[:,ii], 'k--', label='true')
plt.xlabel('k [h/Mpc]')
plt.ylabel(r'$P^\ell_{11}$')
plt.legend()
plt.plot(kvec, p0test[:,ii]/p0[:,ii]-1)
plt.plot(kvec, p2test[:,ii]/p2[:,ii]-1)
plt.ylim(-0.02,0.02)
plt.xlabel('k')
plt.ylabel(r'$\Delta P_\ell /P_\ell$')
```
| github_jupyter |
# Hello, pytorch

__This notebook__ will teach you to use pytorch low-level core. You can install it [here](http://pytorch.org/). For high-level interface see the next notebook.
__Pytorch feels__ differently than tensorflow/theano in almost every level. TensorFlow makes your code live in two "worlds" simultaneously: symbolic graphs and actual tensors. First you declare a symbolic "recipe" of how to get from inputs to outputs, then feed it with actual minibatches of data. In pytorch, __there's only one world__: all tensors have a numeric value.
You compute outputs on the fly without pre-declaring anything. The code looks exactly as in pure numpy with one exception: pytorch computes gradients for you. And can run stuff on GPU. And has a number of pre-implemented building blocks for your neural nets. [And a few more things.](https://medium.com/towards-data-science/pytorch-vs-tensorflow-spotting-the-difference-25c75777377b)
And now we finally shut up and let pytorch do the talking.
```
from __future__ import print_function
import numpy as np
import torch
print(torch.__version__)
# numpy world
x = np.arange(16).reshape(4,4)
print("X :\n%s\n" % x)
print("X.shape : %s\n" % (x.shape,))
print("add 5 :\n%s\n" % (x + 5))
print("X*X^T :\n%s\n" % np.dot(x,x.T))
print("mean over cols :\n%s\n" % (x.mean(axis=-1)))
print("cumsum of cols :\n%s\n" % (np.cumsum(x,axis=0)))
# pytorch world
x = np.arange(16).reshape(4,4)
x = torch.from_numpy(x).type(torch.FloatTensor) #or torch.arange(0,16).view(4,4)
print ("X :\n%s" % x)
print("X.shape : %s\n" % (x.shape,))
print ("add 5 :\n%s" % (x+5))
print ("X*X^T :\n%s" % torch.matmul(x,x.transpose(1,0)))
print ("mean over cols :\n%s" % torch.mean(x,dim=-1))
print ("cumsum of cols :\n%s" % torch.cumsum(x,dim=0))
```
## NumPy and Pytorch
As you can notice, pytorch allows you to hack stuff much the same way you did with numpy. No graph declaration, no placeholders, no sessions. This means that you can _see the numeric value of any tensor at any moment of time_. Debugging such code can be done with by printing tensors or using any debug tool you want (e.g. [gdb](https://wiki.python.org/moin/DebuggingWithGdb)).
You could also notice the a few new method names and a different API. So no, there's no compatibility with numpy [yet](https://github.com/pytorch/pytorch/issues/2228) and yes, you'll have to memorize all the names again. Get excited!

For example,
* If something takes a list/tuple of axes in numpy, you can expect it to take *args in pytorch
* `x.reshape([1,2,8]) -> x.view(1,2,8)`
* You should swap _axis_ for _dim_ in operations like mean or cumsum
* `x.sum(axis=-1) -> x.sum(dim=-1)`
* most mathematical operations are the same, but types an shaping is different
* `x.astype('int64') -> x.type(torch.LongTensor)`
To help you acclimatize, there's a [table](https://github.com/torch/torch7/wiki/Torch-for-Numpy-users) covering most new things. There's also a neat [documentation page](http://pytorch.org/docs/master/).
Finally, if you're stuck with a technical problem, we recommend searching [pytorch forumns](https://discuss.pytorch.org/). Or just googling, which usually works just as efficiently.
If you feel like you almost give up, remember two things: __GPU__ an __free gradients__. Besides you can always jump back to numpy with x.numpy()
### Warmup: trigonometric knotwork
_inspired by [this post](https://www.quora.com/What-are-the-most-interesting-equation-plots)_
There are some simple mathematical functions with cool plots. For one, consider this:
$$ x(t) = t - 1.5 * cos( 15 t) $$
$$ y(t) = t - 1.5 * sin( 16 t) $$
```
import matplotlib.pyplot as plt
%matplotlib inline
t = torch.linspace(-10, 10, steps = 10000)
# compute x(t) and y(t) as defined above
x = ###YOUR CODE
y = ###YOUR CODE
plt.plot(x.numpy(), y.numpy())
```
if you're done early, try adjusting the formula and seing how it affects the function
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
## Automatic gradients
Any self-respecting DL framework must do your backprop for you. Torch handles this with __`Variable`__s and the `autograd` module.
The general pipeline looks like this:
* You create ```a = Variable(data, requires_grad=True)```
* You define some differentiable `loss = whatever(a)`
* Call `loss.backward()`
* Gradients are now available as ```a.grads```
__Here's an example:__ let's fit a linear regression on Boston house prices
```
from sklearn.datasets import load_boston
boston = load_boston()
plt.scatter(boston.data[:, -1], boston.target)
from torch.autograd import Variable
w = Variable(torch.zeros(1), requires_grad=True)
b = Variable(torch.zeros(1), requires_grad=True)
# cast data into torch variables
x = Variable(torch.FloatTensor(boston.data[:,-1] / 10))
y = Variable(torch.FloatTensor(boston.target))
y_pred = w * x + b
loss = torch.mean( (y_pred - y)**2 )
# propagete gradients
loss.backward()
```
The gradients are now stored in `.grad` of a variable.
```
print("dL/dw = \n", w.grad)
print("dL/db = \n", b.grad)
```
If you compute gradient from multiple losses, the gradients will add up at variables, therefore it's useful to __zero the gradients__ between iteratons.
```
from IPython.display import clear_output
for i in range(100):
y_pred = w * x + b
loss = torch.mean( (y_pred - y)**2 )
loss.backward()
w.data -= 0.05 * w.grad.data
b.data -= 0.05 * b.grad.data
#zero gradients
w.grad.data.zero_()
b.grad.data.zero_()
# the rest of code is just bells and whistles
if (i+1)%5==0:
clear_output(True)
plt.scatter(x.data.numpy(), y.data.numpy())
plt.scatter(x.data.numpy(), y_pred.data.numpy(), color='orange', linewidth=5)
plt.show()
print("loss = ", loss.data.numpy()[0])
if loss.data.numpy()[0] < 0.5:
print("Done!")
break
```
__Bonus quest__: try implementing and writing some nonlinear regression. You can try quadratic features or some trigonometry, or a simple neural network. The only difference is that now you have more variables and a more complicated `y_pred`.
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
### Tensor vs Variable

` `
Tensor and Variable Weasley were identical and mischievous twin abstractions living in pytorch. Brilliant pranksters, they make sure your code never runs successfully from the first attempt.
Seriously though, Variable wraps around the torch tensor and allows you to compute gradients. Theoretically, Variable acts just like tensor for all intents and purposes. Practically, you will find yourself debugging tensor vs variable issues most of your first
Ideally, you could wrap everything into Variable from the get-go an forget about Tensor. Hopefully you will once be able to do so __[upvote [this](https://github.com/pytorch/pytorch/issues/2228)]__.
Right now you can't, but it's getting better with each update.
The good news is that you can always swap between the two seamlessly:
* tensor to variable: `Variable(x)`
* variable to tensor: `x.data`
```
from torch.autograd import Variable
x = torch.arange(0,16).view(4,4).type(torch.IntTensor)
x_var = Variable(x)
print ("Result (tensors):\n", (x == 1) | (x % 3 == 0))
print ("Result (variables):\n", (x_var == 1) | (x_var % 3==0))
sequence = torch.randn(1,8,10)
filters = torch.randn(2,8,3)
#will work:
print("conv1d (variables):\n",torch.nn.functional.conv1d(Variable(sequence),Variable(filters)))
#will not work:
try:
print("conv1d (tensors):")
print(torch.nn.functional.conv1d(sequence,filters))
except Exception as e:
print (e)
```
# High-level pytorch
So far we've been dealing with low-level torch API. While it's absolutely vital for any custom losses or layers, building large neura nets in it is a bit clumsy.
Luckily, there's also a high-level torch interface with a pre-defined layers, activations and training algorithms.
We'll cover them as we go through a simple image recognition problem: classifying letters into __"A"__ vs __"B"__.
```
from notmnist import load_notmnist
X_train, y_train, X_test, y_test = load_notmnist(letters='AB')
X_train, X_test = X_train.reshape([-1, 784]), X_test.reshape([-1, 784])
print("Train size = %i, test_size = %i"%(len(X_train),len(X_test)))
for i in [0,1]:
plt.subplot(1,2,i+1)
plt.imshow(X_train[i].reshape([28,28]))
plt.title(str(y_train[i]))
```
Let's start with layers. The main abstraction here is __`torch.nn.Module`__
```
from torch import nn
import torch.nn.functional as F
print(nn.Module.__doc__)
```
There's a vast library of popular layers and architectures already built for ya'.
This is a binary classification problem, so we'll train a __Logistic Regression with sigmoid__.
$$P(y_i | X_i) = \sigma(W \cdot X_i + b) ={ 1 \over {1+e^{- [W \cdot X_i + b]}} }$$
```
# create a network that stacks layers on top of each other
model = nn.Sequential()
# add first "dense" layer with 784 input units and 1 output unit.
model.add_module('l1', nn.Linear(784, 1))
# add softmax activation for probabilities. Normalize over axis 1
# note: layer names must be unique
model.add_module('l2', nn.Sigmoid())
print("Weight shapes:", [w.shape for w in model.parameters()])
# create dummy data with 3 samples and 784 features
x = Variable(torch.FloatTensor(X_train[:3]))
y = Variable(torch.FloatTensor(y_train[:3]))
# compute outputs given inputs, both are variables
y_predicted = model(x)[:, 0]
y_predicted # display what we've got
```
Let's now define a loss function for our model.
The natural choice is to use binary crossentropy (aka logloss, negative llh):
$$ L = {1 \over N} \underset{X_i,y_i} \sum - [ y_i \cdot log P(y_i | X_i) + (1-y_i) \cdot log (1-P(y_i | X_i)) ]$$
```
crossentropy = ### YOUR CODE
loss = ### YOUR CODE
assert tuple(crossentropy.size()) == (3,), "Crossentropy must be a vector with element per sample"
assert tuple(loss.size()) == (1,), "Loss must be scalar. Did you forget the mean/sum?"
assert loss.data.numpy()[0] > 0, "Crossentropy must non-negative, zero only for perfect prediction"
assert loss.data.numpy()[0] <= np.log(3), "Loss is too large even for untrained model. Please double-check it."
```
When we trained Linear Regression above, we had to manually .zero_() gradients on both our variables. Imagine that code for a 50-layer network.
Again, to keep it from getting dirty, there's `torch.optim` module with pre-implemented algorithms:
```
opt = torch.optim.RMSprop(model.parameters(), lr=0.01)
# here's how it's used:
loss.backward() # add new gradients
opt.step() # change weights
opt.zero_grad() # clear gradients
# dispose of old variables to avoid bugs later
del x, y, y_predicted, loss
```
### Putting it all together
```
# create network again just in case
model = nn.Sequential()
model.add_module('first', nn.Linear(784, 1))
model.add_module('second', nn.Sigmoid())
opt = torch.optim.Adam(model.parameters(), lr=1e-3)
history = []
for i in range(100):
# sample 256 random images
ix = np.random.randint(0, len(X_train), 256)
x_batch = Variable(torch.FloatTensor(X_train[ix]))
y_batch = Variable(torch.FloatTensor(y_train[ix]))
# predict probabilities
y_predicted = ### YOUR CODE
# compute loss, just like before
loss = ### YOUR CODE
# compute gradients
<YOUR CODE>
# RMSprop step
<YOUR CODE>
# clear gradients
<YOUR CODE>
history.append(loss.data.numpy()[0])
if i % 10 == 0:
print("step #%i | mean loss = %.3f" % (i, np.mean(history[-10:])))
```
__Debugging tips:__
* make sure your model predicts probabilities correctly. Just print them and see what's inside.
* don't forget _minus_ sign in the loss function! It's a mistake 99% ppl do at some point.
* make sure you zero-out gradients after each step. Srsly:)
* In general, pytorch's error messages are quite helpful, read 'em before you google 'em.
* if you see nan/inf, print what happens at each iteration to find our where exactly it occurs.
* If loss goes down and then turns nan midway through, try smaller learning rate. (Our current loss formula is unstable).
### Evaluation
Let's see how our model performs on test data
```
# use your model to predict classes (0 or 1) for all test samples
predicted_y_test = ### YOUR CODE
assert isinstance(predicted_y_test, np.ndarray), "please return np array, not %s" % type(predicted_y_test)
assert predicted_y_test.shape == y_test.shape, "please predict one class for each test sample"
assert np.in1d(predicted_y_test, y_test).all(), "please predict class indexes"
accuracy = np.mean(predicted_y_test == y_test)
print("Test accuracy: %.5f" % accuracy)
assert accuracy > 0.95, "try training longer"
```
## More about pytorch:
* Using torch on GPU and multi-GPU - [link](http://pytorch.org/docs/master/notes/cuda.html)
* More tutorials on pytorch - [link](http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html)
* Pytorch examples - a repo that implements many cool DL models in pytorch - [link](https://github.com/pytorch/examples)
* Practical pytorch - a repo that implements some... other cool DL models... yes, in pytorch - [link](https://github.com/spro/practical-pytorch)
* And some more - [link](https://www.reddit.com/r/pytorch/comments/6z0yeo/pytorch_and_pytorch_tricks_for_kaggle/)
```
```
```
```
```
```
```
```
```
```
# Homework tasks
There will be three tasks worth 2, 3 and 5 points respectively.
If you get stuck with no progress, try switching to the next task and returning later.
### Task I (2 points) - tensormancy

When dealing with more complex stuff like neural network, it's best if you use tensors the way samurai uses his sword.
__1.1 the cannabola__
[_disclaimer_](https://gist.githubusercontent.com/justheuristic/e2c1fa28ca02670cabc42cacf3902796/raw/fd3d935cef63a01b85ed2790b5c11c370245cbd7/stddisclaimer.h)
Let's write another function, this time in polar coordinates:
$$\rho(\theta) = (1 + 0.9 \cdot cos (8 \cdot \theta) ) \cdot (1 + 0.1 \cdot cos(24 \cdot \theta)) \cdot (0.9 + 0.05 \cdot cos(200 \cdot \theta)) \cdot (1 + sin(\theta))$$
Then convert it into cartesian coordinates ([howto](http://www.mathsisfun.com/polar-cartesian-coordinates.html)) and plot the results.
Use torch tensors only: no lists, loops, numpy arrays, etc.
```
theta = torch.linspace(- np.pi, np.pi, steps=1000)
# compute rho(theta) as per formula above
rho = ### YOUR CODE
# Now convert polar (rho, theta) pairs into cartesian (x,y) to plot them.
x = ### YOUR CODE
y = ### YOUR CODE
plt.figure(figsize=[6,6])
plt.fill(x.numpy(), y.numpy(), color='green')
plt.grid()
```
### Task II: the game of life (3 points)
Now it's time for you to make something more challenging. We'll implement Conway's [Game of Life](http://web.stanford.edu/~cdebs/GameOfLife/) in _pure pytorch_.
While this is still a toy task, implementing game of life this way has one cool benefit: __you'll be able to run it on GPU! __ Indeed, what could be a better use of your gpu than simulating game of life on 1M/1M grids?

If you've skipped the url above out of sloth, here's the game of life:
* You have a 2D grid of cells, where each cell is "alive"(1) or "dead"(0)
* Any living cell that has 2 or 3 neighbors survives, else it dies [0,1 or 4+ neighbors]
* Any cell with exactly 3 neighbors becomes alive (if it was dead)
For this task, you are given a reference numpy implementation that you must convert to pytorch.
_[numpy code inspired by: https://github.com/rougier/numpy-100]_
__Note:__ You can find convolution in `torch.nn.functional.conv2d(Z,filters)`. Note that it has a different input format.
```
from scipy.signal import convolve2d
def np_update(Z):
# Count neighbours with convolution
filters = np.array([[1,1,1],
[1,0,1],
[1,1,1]])
N = convolve2d(Z,filters,mode='same')
# Apply rules
birth = (N==3) & (Z==0)
survive = ((N==2) | (N==3)) & (Z==1)
Z[:] = birth | survive
return Z
def torch_update(Z):
"""
Implement an update function that does to Z exactly the same as np_update.
:param Z: torch.FloatTensor of shape [height,width] containing 0s(dead) an 1s(alive)
:returns: torch.FloatTensor Z after updates.
You can opt to create new tensor or change Z inplace.
"""
#<Your code here!>
return Z
#initial frame
Z_numpy = np.random.choice([0,1],p=(0.5,0.5),size=(100,100))
Z = torch.from_numpy(Z_numpy).type(torch.FloatTensor)
#your debug polygon :)
Z_new = torch_update(Z.clone())
#tests
Z_reference = np_update(Z_numpy.copy())
assert np.all(Z_new.numpy() == Z_reference), "your pytorch implementation doesn't match np_update. Look into Z and np_update(ZZ) to investigate."
print("Well done!")
%matplotlib notebook
plt.ion()
#initialize game field
Z = np.random.choice([0,1],size=(100,100))
Z = torch.from_numpy(Z).type(torch.FloatTensor)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.show()
for _ in range(100):
#update
Z = torch_update(Z)
#re-draw image
ax.clear()
ax.imshow(Z.numpy(),cmap='gray')
fig.canvas.draw()
#Some fun setups for your amusement
#parallel stripes
Z = np.arange(100)%2 + np.zeros([100,100])
#with a small imperfection
Z[48:52,50]=1
Z = torch.from_numpy(Z).type(torch.FloatTensor)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.show()
for _ in range(100):
Z = torch_update(Z)
ax.clear()
ax.imshow(Z.numpy(),cmap='gray')
fig.canvas.draw()
```
More fun with Game of Life: [video](https://www.youtube.com/watch?v=C2vgICfQawE)
```
```
```
```
```
```
```
```
```
```
### Task III: Going deeper (5 points)
<img src="http://download.gamezone.com/uploads/image/data/1190338/article_post_width_a88.jpg" width=360>
Your ultimate task for this week is to build your first neural network [almost] from scratch and pure torch.
This time you will solve the same digit recognition problem, but at a greater scale
* 10 different letters
* 20k samples
We want you to build a network that reaches at least 80% accuracy and has at least 2 linear layers in it. Naturally, it should be nonlinear to beat logistic regression. You can implement it with either
With 10 classes you will need to use __Softmax__ at the top instead of sigmoid and train for __categorical crossentropy__ (see [here](https://www.kaggle.com/wiki/LogLoss)). Write your own loss or use `torch.nn.functional.nll_loss`. Just make sure you understand what it accepts as an input.
Note that you are not required to build 152-layer monsters here. A 2-layer (one hidden, one output) neural network should already give you an edge over logistic regression.
__[bonus kudos]__
If you've already beaten logistic regression with a two-layer net, but enthusiasm still ain't gone, you can try improving the test accuracy even further! It should be possible to reach 90% without convnets.
__SPOILERS!__
At the end of the notebook you will find a few tips and frequent errors.
If you feel confident enogh, just start coding right away and get there ~~if~~ once you need to untangle yourself.
```
from notmnist import load_notmnist
X_train, y_train, X_test, y_test = load_notmnist(letters='ABCDEFGHIJ')
X_train, X_test = X_train.reshape([-1, 784]), X_test.reshape([-1, 784])
%matplotlib inline
plt.figure(figsize=[12,4])
for i in range(20):
plt.subplot(2,10,i+1)
plt.imshow(X_train[i].reshape([28,28]))
plt.title(str(y_train[i]))
#< a whole lot of your code >
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
# SPOILERS!
Recommended pipeline
* Adapt logistic regression from previous assignment to classify one letter against others (e.g. A vs the rest)
* Generalize it to multiclass logistic regression.
- Either try to remember lecture 0 or google it.
- Instead of weight vector you'll have to use matrix (feature_id x class_id)
- softmax (exp over sum of exps) can implemented manually or as nn.Softmax (layer) F.softmax (function)
- probably better to use STOCHASTIC gradient descent (minibatch) for greater speed
- you can also try momentum/rmsprop/adawhatever
- in which case sample should probably be shuffled (or use random subsamples on each iteration)
* Add a hidden layer. Now your logistic regression uses hidden neurons instead of inputs.
- Hidden layer uses the same math as output layer (ex-logistic regression), but uses some nonlinearity (e.g. sigmoid) instead of softmax
- You need to train both layers, not just output layer :)
- __Do not initialize weights with zeros__ (due to symmetry effects). A gaussian noize with small variance will do.
- 50 hidden neurons and a sigmoid nonlinearity will do for a start. Many ways to improve.
- In ideal casae this totals to 2 .dot's, 1 softmax and 1 sigmoid
- __make sure this neural network works better than logistic regression__
* Now's the time to try improving the network. Consider layers (size, neuron count), nonlinearities, optimization methods, initialization - whatever you want, but please avoid convolutions for now.
* If anything seems wrong, try going through one step of training and printing everything you compute.
* If you see NaNs midway through optimization, you can estimate log P(y|x) as via F.log_softmax(layer_before_softmax)
| github_jupyter |
```
import pandas as pd
import pyspark.sql.functions as F
from datetime import datetime
from pyspark.sql.types import *
from pyspark import StorageLevel
import numpy as np
pd.set_option("display.max_rows", 1000)
pd.set_option("display.max_columns", 1000)
pd.set_option("mode.chained_assignment", None)
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer
# from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.feature import OneHotEncoderEstimator, StringIndexer, VectorAssembler
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.sql import Row
from pyspark.ml.linalg import Vectors
# !pip install scikit-plot
import sklearn
import scikitplot as skplt
from sklearn.metrics import classification_report, confusion_matrix, precision_score
```
<hr />
<hr />
<hr />
```
addr_parquet = 'gs://ai-covid19-datalake/trusted/XGB-StratifiedSampleDatasets-3/dataset_3_positivepcrforcovid_000.parquet'
df = spark.read.parquet(addr_parquet)
id_cols = ['NU_NOTIFIC', 'CLASSI_FIN']
# taking out results of laboratorial exams (keeping only diagnostic images), dates, ids or integer attributes
cols = [
'NU_NOTIFIC', 'CLASSI_FIN', 'CRITERIO', 'EVOLUCAO',
'AGE_GROUP', 'DIST_PRI_NOTIFIC_Q',
'DIST_PRI_INTERNA_Q', 'DIST_PRI_ENTUTI_Q', 'DIST_PRI_SAIDUTI_Q', 'DIST_PRI_EVOLUCA_Q', 'DIST_PRI_ENCERRA_Q',
'SYMP_GROUP1', 'SYMP_GROUP2', 'SYMP_GROUP3', 'SYMP_GROUP4',
'RF_GROUP1', 'RF_GROUP2', 'RF_GROUP3', 'RF_GROUP4',
'SUPORT_VEN', 'UTI', 'HOSPITAL',
'DIST_PRI_RAIOX_Q', 'DIST_PRI_TOMO_Q', 'DIST_PRI_IF_Q', 'DIST_PRI_TRA_Q', 'DIST_PRI_PCR_Q', 'DIST_PRI_SOR_Q',
'RAIOX_RES', 'TOMO_RES',
'GMR_TRANSIT_STATIONS_1WEEK_BEFORE_Q', 'GMR_RETAIL_AND_RECREATION_1WEEK_BEFORE_Q', 'GMR_RESIDENTIAL_PERCENT_1WEEK_BEFORE_Q', 'GMR_WORKPLACES_PERCENT_1WEEK_BEFORE_Q',
'GMR_TRANSIT_STATIONS_2WEEKS_Q', 'GMR_RETAIL_AND_RECREATION_2WEEKS_Q', 'GMR_RESIDENTIAL_PERCENT_2WEEKS_Q', 'GMR_WORKPLACES_PERCENT_2WEEKS_Q',
'INMET_RELATIVE_AIR_HUMIDITY_1WEEK_BEFORE_Q', 'INMET_RELATIVE_AIR_HUMIDITY_2WEEKS_BEFORE_Q'
]
df.select(cols).limit(5).toPandas()
#
num_cols = [x for x in cols if x not in id_cols] + ['CLASSI_FIN']
df = df.select(num_cols)
df = df.na.fill('9999')
df = df.na.fill(9999)
for col in num_cols:
df = df.withColumn(col, F.col(col).cast('float'))
df = df.dropna()
# making some inspection of nulls
for col in df.select(num_cols).columns:
print('{}: {}'.format(col, df.filter(F.col(col).isNull()).count()))
df = df.withColumn('CLASSI_FIN', F.when(F.col('CLASSI_FIN') == 1.0, 0.0).otherwise(1.0))
df.select('CLASSI_FIN').groupBy('CLASSI_FIN').count().orderBy('CLASSI_FIN').show()
# before codification of classi_fin
# +----------+-----+
# |CLASSI_FIN|count|
# +----------+-----+
# | 1.0|28422|
# | 5.0|43478|
# +----------+-----+
df.select(num_cols).limit(1).toPandas()
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="CLASSI_FIN", outputCol="indexedLabel").fit(df)
input_cols = [x for x in cols if x not in id_cols]
assembler = VectorAssembler(inputCols = input_cols, outputCol= 'features')
df = assembler.transform(df)
# Automatically identify categorical features, and index them.
# Set maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer = VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=20).fit(df)
from pyspark import StorageLevel
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = df.randomSplit([0.7, 0.3])
trainingData = trainingData.persist(StorageLevel.MEMORY_ONLY)
testData = testData.persist(StorageLevel.MEMORY_ONLY)
# Train a RandomForest model.
rf = RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures", numTrees=500, maxDepth=10)
# Convert indexed labels back to original labels.
labelConverter = IndexToString(inputCol="prediction", outputCol="predictedLabel",
labels=labelIndexer.labels)
# Chain indexers and forest in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, rf, labelConverter])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
predictions.limit(1).toPandas()
# Select example rows to display.
predictions.select("predictedLabel", "indexedLabel", "indexedFeatures").show(50)
# Select (prediction, true label) and compute test error
print('Area under ROC and Precision-Recall curves')
print('')
evaluator = BinaryClassificationEvaluator(
labelCol="indexedLabel", rawPredictionCol="prediction", metricName="areaUnderROC")
accuracy = evaluator.evaluate(predictions)
print("Area Under ROC: {}".format(accuracy))
# Select (prediction, true label) and compute test error
evaluator = BinaryClassificationEvaluator(
labelCol="indexedLabel", rawPredictionCol="prediction", metricName="areaUnderPR")
accuracy = evaluator.evaluate(predictions)
print("Area Under PR: {}".format(accuracy))
print('')
print('Classification Report')
print('')
# ------ #
pred = predictions.select(['CLASSI_FIN', 'predictedLabel'])\
.withColumn('predictedLabel', F.col('predictedLabel').cast('double'))\
.withColumn('predictedLabel', F.when(F.col('predictedLabel') == 1.0, 'covid').otherwise('n-covid'))\
.withColumn('CLASSI_FIN', F.when(F.col('CLASSI_FIN') == 1.0, 'covid').otherwise('n-covid'))\
.toPandas()
y_true = pred['CLASSI_FIN'].tolist()
y_pred = pred['predictedLabel'].tolist()
print(classification_report(y_true, y_pred))
print('')
print('Confusion Matrix')
print('')
confusion_matrix(y_true, y_pred)
skplt.metrics.plot_confusion_matrix(y_true, y_pred, normalize='all')
rfModel = model.stages[2]
print(rfModel) # summary only
def ExtractFeatureImp(featureImp, dataset, featuresCol):
list_extract = []
for i in dataset.schema[featuresCol].metadata["ml_attr"]["attrs"]:
list_extract = list_extract + dataset.schema[featuresCol].metadata["ml_attr"]["attrs"][i]
varlist = pd.DataFrame(list_extract)
varlist['score'] = varlist['idx'].apply(lambda x: featureImp[x])
return(varlist.sort_values('score', ascending = False))
ExtractFeatureImp(model.stages[-2].featureImportances, df, "features").head(100)
```
<hr />
<hr />
<hr />
### grid search
```
# pipeline = Pipeline(stages=[rf])
# paramGrid = ParamGridBuilder()\
# .addGrid(rf.seed, [2021]) \
# .addGrid(rf.numTrees, [21, 31]).build()
# paramGrid = ParamGridBuilder()\
# .addGrid(rf.seed, [2021]) \
# .addGrid(rf.numTrees, [21, 31]) \
# .addGrid(rf.maxDepth, [2, 5]) \
# .addGrid(rf.maxBins, [10, 30]) \
# .addGrid(rf.minInstancesPerNode, [1, 15]) \
# .addGrid(rf.minInfoGain, [0, 3]) \
# .addGrid(rf.featureSubsetStrategy, ['all', 'auto']) \
# .addGrid(rf.impurity, ['gini', 'entropy']).build()
# paramGrid = ParamGridBuilder()\
# .addGrid(rf.seed, [2021]) \
# .addGrid(rf.numTrees, range(10, 150, 30)) \
# .addGrid(rf.maxDepth, range(3, 15, 2)) \
# .addGrid(rf.maxBins, range(10, 90, 30)) \
# .addGrid(rf.minInstancesPerNode, range(1, 100, 20)) \
# .addGrid(rf.minInfoGain, range(0, 10, 2)) \
# .addGrid(rf.featureSubsetStrategy, ['all', 'auto', 'onethird', 'sqrt', 'log2']) \
# .addGrid(rf.impurity, ['gini', 'entropy']).build()
# crossval = CrossValidator(estimator=pipeline,
# estimatorParamMaps=paramGrid,
# evaluator=BinaryClassificationEvaluator(),
# numFolds=10)
# cvModel = crossval.fit(trainingData)
# cvModel.bestModel.extractParamMap()
print('finished')
```
<hr />
<hr />
<hr />
## Running on SUPER SRAG 'reclass' dataset
```
reclas_ss = spark.read.parquet('gs://ai-covid19-datalake/standard/underdiagnose_srag/base_reclass.parquet').persist(StorageLevel.MEMORY_ONLY)
#temp: this reclass dataset has no 8.0 category, and the fited data has
reclas_ss = reclas_ss.filter(F.col('RF_GROUP1') != 8.0)
reclas_ss.limit(2).toPandas()
reclas_ss.select('CLASSI_FIN').groupBy('CLASSI_FIN').count().show()
#
num_cols = [x for x in cols if x not in id_cols] + ['CLASSI_FIN']
reclas_ss = reclas_ss.select(num_cols + ['NU_NOTIFIC'])
reclas_ss = reclas_ss.na.fill('9999')
reclas_ss = reclas_ss.na.fill(9999)
for col in num_cols:
reclas_ss = reclas_ss.withColumn(col, F.col(col).cast('float'))
reclas_ss = reclas_ss.dropna()
# making some inspection of nulls
for col in df.select(num_cols).columns:
print('{}: {}'.format(col, reclas_ss.filter(F.col(col).isNull()).count()))
# just transforming CLASSI_FIN's unclassified category for a valid one, it willnot be used on the classfic. task
reclas_ss = reclas_ss.withColumn('CLASSI_FIN', F.lit(0.0))
reclas_ss.select('CLASSI_FIN').groupBy('CLASSI_FIN').count().show()
reclas_ss.printSchema()
input_cols = [x for x in cols if x not in id_cols]
assembler = VectorAssembler(inputCols = input_cols, outputCol= 'features') #handleInvalid='skip'
reclas_ss = assembler.transform(reclas_ss)
# Automatically identify categorical features, and index them.
# Set maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer = VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=20).fit(reclas_ss.select(input_cols + ['features', 'NU_NOTIFIC'])) #handleInvalid='skip'
reclas_ss.limit(2).toPandas()
reclas_ss_pred = model.transform(reclas_ss)
reclas_ss_pred.limit(2).toPandas()
reclas_ss_pred.select('CLASSI_FIN').groupBy('CLASSI_FIN').count().show()
reclas_ss_pred.select('predictedLabel').groupBy('predictedLabel').count().show()
reclas_ss_pred.filter((F.col('predictedLabel') == 1.0)).groupBy('EVOLUCAO').count().orderBy('EVOLUCAO').show()
```
<hr />
<hr />
<hr />
### result predicted dataset
```
struct_dataset = addr_parquet.split('_')
number_dataset = addr_parquet.split('_')[1]
name_dataset = addr_parquet.split('_')[2]
id_sample = addr_parquet.split('_')[-1].split('.')[0]
reclas_ss = spark.read.parquet('gs://ai-covid19-datalake/standard/underdiagnose_srag/base_reclass.parquet').persist(StorageLevel.MEMORY_ONLY)
reclas_ss.limit(2).toPandas()
reclas_ss_pred = reclas_ss_pred.select(['NU_NOTIFIC', 'predictedLabel']).withColumnRenamed('predictedLabel', 'CLASSI_FIN_pred_'+id_sample)
reclas_ss_pred.limit(2).toPandas()
# to enable some validation after the join
c = reclas_ss.count()
dist_c = reclas_ss.distinct().count()
print('Reclass dataset has {} records, {} of them has a distinct NU_NOTIFIC.'.format(c, dist_c))
c = reclas_ss_pred.count()
dist_c = reclas_ss_pred.distinct().count()
print('Predicted Reclass dataset has {} records, {} of them has a distinct NU_NOTIFIC.'.format(c, dist_c))
reclas_ss = reclas_ss.join(reclas_ss_pred, 'NU_NOTIFIC', 'left')
# performing some validation
c = reclas_ss.count()
dist_c = reclas_ss.distinct().count()
print('Reclass dataset has {} records, {} of them has a distinct NU_NOTIFIC.'.format(c, dist_c))
# writing result
reclas_ss.write.parquet('gs://ai-covid19-datalake/trusted/RF-StratifiedSampleDatasets-1-RESULT.parquet', mode='overwrite')
reclas_ss.select('CLASSI_FIN_pred_000','ANO')\
.filter((F.col('CLASSI_FIN_pred_000') == 1.0))\
.groupBy('ANO').count()\
.orderBy('ANO').show(100)
reclas_ss.select('CLASSI_FIN_pred_000', 'AGE_GROUP', 'EVOLUCAO', 'ANO')\
.filter((F.col('CLASSI_FIN_pred_000') == 1.0))\
.groupBy('ANO', 'AGE_GROUP', 'EVOLUCAO').count()\
.orderBy(['ANO', 'AGE_GROUP', 'EVOLUCAO']).show(200)
```
| github_jupyter |
# Preface
The locations requiring configuration for your experiment are commented in capital text.
# Setup
## Installations
```
!pip install sphinxcontrib-napoleon
!pip install sphinxcontrib-bibtex
!pip install -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ submodlib
!git clone https://github.com/decile-team/distil.git
!git clone https://github.com/circulosmeos/gdown.pl.git
import sys
sys.path.append("/content/distil/")
```
**Experiment-Specific Imports**
```
from distil.utils.models.resnet import ResNet18 # IMPORT YOUR MODEL HERE
```
## Main Imports
```
import pandas as pd
import numpy as np
import copy
from torch.utils.data import Dataset, DataLoader, Subset, ConcatDataset
import torch.nn.functional as F
from torch import nn
from torchvision import transforms
from torchvision import datasets
from PIL import Image
import torch
import torch.optim as optim
from torch.autograd import Variable
import sys
sys.path.append('../')
import matplotlib.pyplot as plt
import time
import math
import random
import os
import pickle
from numpy.linalg import cond
from numpy.linalg import inv
from numpy.linalg import norm
from scipy import sparse as sp
from scipy.linalg import lstsq
from scipy.linalg import solve
from scipy.optimize import nnls
from distil.active_learning_strategies.badge import BADGE
from distil.active_learning_strategies.glister import GLISTER
from distil.active_learning_strategies.margin_sampling import MarginSampling
from distil.active_learning_strategies.entropy_sampling import EntropySampling
from distil.active_learning_strategies.random_sampling import RandomSampling
from distil.active_learning_strategies.gradmatch_active import GradMatchActive
from distil.active_learning_strategies.fass import FASS
from distil.active_learning_strategies.adversarial_bim import AdversarialBIM
from distil.active_learning_strategies.adversarial_deepfool import AdversarialDeepFool
from distil.active_learning_strategies.core_set import CoreSet
from distil.active_learning_strategies.least_confidence_sampling import LeastConfidenceSampling
from distil.active_learning_strategies.margin_sampling import MarginSampling
from distil.active_learning_strategies.bayesian_active_learning_disagreement_dropout import BALDDropout
from distil.utils.train_helper import data_train
from distil.utils.utils import LabeledToUnlabeledDataset
from google.colab import drive
import warnings
warnings.filterwarnings("ignore")
```
## Checkpointing and Logs
```
class Checkpoint:
def __init__(self, acc_list=None, indices=None, state_dict=None, experiment_name=None, path=None):
# If a path is supplied, load a checkpoint from there.
if path is not None:
if experiment_name is not None:
self.load_checkpoint(path, experiment_name)
else:
raise ValueError("Checkpoint contains None value for experiment_name")
return
if acc_list is None:
raise ValueError("Checkpoint contains None value for acc_list")
if indices is None:
raise ValueError("Checkpoint contains None value for indices")
if state_dict is None:
raise ValueError("Checkpoint contains None value for state_dict")
if experiment_name is None:
raise ValueError("Checkpoint contains None value for experiment_name")
self.acc_list = acc_list
self.indices = indices
self.state_dict = state_dict
self.experiment_name = experiment_name
def __eq__(self, other):
# Check if the accuracy lists are equal
acc_lists_equal = self.acc_list == other.acc_list
# Check if the indices are equal
indices_equal = self.indices == other.indices
# Check if the experiment names are equal
experiment_names_equal = self.experiment_name == other.experiment_name
return acc_lists_equal and indices_equal and experiment_names_equal
def save_checkpoint(self, path):
# Get current time to use in file timestamp
timestamp = time.time_ns()
# Create the path supplied
os.makedirs(path, exist_ok=True)
# Name saved files using timestamp to add recency information
save_path = os.path.join(path, F"c{timestamp}1")
copy_save_path = os.path.join(path, F"c{timestamp}2")
# Write this checkpoint to the first save location
with open(save_path, 'wb') as save_file:
pickle.dump(self, save_file)
# Write this checkpoint to the second save location
with open(copy_save_path, 'wb') as copy_save_file:
pickle.dump(self, copy_save_file)
def load_checkpoint(self, path, experiment_name):
# Obtain a list of all files present at the path
timestamp_save_no = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
# If there are no such files, set values to None and return
if len(timestamp_save_no) == 0:
self.acc_list = None
self.indices = None
self.state_dict = None
return
# Sort the list of strings to get the most recent
timestamp_save_no.sort(reverse=True)
# Read in two files at a time, checking if they are equal to one another.
# If they are equal, then it means that the save operation finished correctly.
# If they are not, then it means that the save operation failed (could not be
# done atomically). Repeat this action until no possible pair can exist.
while len(timestamp_save_no) > 1:
# Pop a most recent checkpoint copy
first_file = timestamp_save_no.pop(0)
# Keep popping until two copies with equal timestamps are present
while True:
second_file = timestamp_save_no.pop(0)
# Timestamps match if the removal of the "1" or "2" results in equal numbers
if (second_file[:-1]) == (first_file[:-1]):
break
else:
first_file = second_file
# If there are no more checkpoints to examine, set to None and return
if len(timestamp_save_no) == 0:
self.acc_list = None
self.indices = None
self.state_dict = None
return
# Form the paths to the files
load_path = os.path.join(path, first_file)
copy_load_path = os.path.join(path, second_file)
# Load the two checkpoints
with open(load_path, 'rb') as load_file:
checkpoint = pickle.load(load_file)
with open(copy_load_path, 'rb') as copy_load_file:
checkpoint_copy = pickle.load(copy_load_file)
# Do not check this experiment if it is not the one we need to restore
if checkpoint.experiment_name != experiment_name:
continue
# Check if they are equal
if checkpoint == checkpoint_copy:
# This checkpoint will suffice. Populate this checkpoint's fields
# with the selected checkpoint's fields.
self.acc_list = checkpoint.acc_list
self.indices = checkpoint.indices
self.state_dict = checkpoint.state_dict
return
# Instantiate None values in acc_list, indices, and model
self.acc_list = None
self.indices = None
self.state_dict = None
def get_saved_values(self):
return (self.acc_list, self.indices, self.state_dict)
def delete_checkpoints(checkpoint_directory, experiment_name):
# Iteratively go through each checkpoint, deleting those whose experiment name matches.
timestamp_save_no = [f for f in os.listdir(checkpoint_directory) if os.path.isfile(os.path.join(checkpoint_directory, f))]
for file in timestamp_save_no:
delete_file = False
# Get file location
file_path = os.path.join(checkpoint_directory, file)
if not os.path.exists(file_path):
continue
# Unpickle the checkpoint and see if its experiment name matches
with open(file_path, "rb") as load_file:
checkpoint_copy = pickle.load(load_file)
if checkpoint_copy.experiment_name == experiment_name:
delete_file = True
# Delete this file only if the experiment name matched
if delete_file:
os.remove(file_path)
#Logs
def write_logs(logs, save_directory, rd):
file_path = save_directory + 'run_'+'.txt'
with open(file_path, 'a') as f:
f.write('---------------------\n')
f.write('Round '+str(rd)+'\n')
f.write('---------------------\n')
for key, val in logs.items():
if key == 'Training':
f.write(str(key)+ '\n')
for epoch in val:
f.write(str(epoch)+'\n')
else:
f.write(str(key) + ' - '+ str(val) +'\n')
```
## AL Loop
```
def train_one(full_train_dataset, initial_train_indices, test_dataset, net, n_rounds, budget, args, nclasses, strategy, save_directory, checkpoint_directory, experiment_name):
# Split the full training dataset into an initial training dataset and an unlabeled dataset
train_dataset = Subset(full_train_dataset, initial_train_indices)
initial_unlabeled_indices = list(set(range(len(full_train_dataset))) - set(initial_train_indices))
unlabeled_dataset = Subset(full_train_dataset, initial_unlabeled_indices)
# Set up the AL strategy
if strategy == "random":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = RandomSampling(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
elif strategy == "entropy":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = EntropySampling(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
elif strategy == "margin":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = MarginSampling(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
elif strategy == "least_confidence":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = LeastConfidenceSampling(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
elif strategy == "badge":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = BADGE(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
elif strategy == "coreset":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = CoreSet(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
elif strategy == "fass":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = FASS(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
elif strategy == "glister":
strategy_args = {'batch_size' : args['batch_size'], 'lr': args['lr'], 'device':args['device']}
strategy = GLISTER(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args, typeOf='rand', lam=0.1)
elif strategy == "adversarial_bim":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = AdversarialBIM(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
elif strategy == "adversarial_deepfool":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = AdversarialDeepFool(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
elif strategy == "bald":
strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']}
strategy = BALDDropout(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args)
# Define acc initially
acc = np.zeros(n_rounds+1)
initial_unlabeled_size = len(unlabeled_dataset)
initial_round = 1
# Define an index map
index_map = np.array([x for x in range(initial_unlabeled_size)])
# Attempt to load a checkpoint. If one exists, then the experiment crashed.
training_checkpoint = Checkpoint(experiment_name=experiment_name, path=checkpoint_directory)
rec_acc, rec_indices, rec_state_dict = training_checkpoint.get_saved_values()
# Check if there are values to recover
if rec_acc is not None:
# Restore the accuracy list
for i in range(len(rec_acc)):
acc[i] = rec_acc[i]
# Restore the indices list and shift those unlabeled points to the labeled set.
index_map = np.delete(index_map, rec_indices)
# Record initial size of the training dataset
intial_seed_size = len(train_dataset)
restored_unlabeled_points = Subset(unlabeled_dataset, rec_indices)
train_dataset = ConcatDataset([train_dataset, restored_unlabeled_points])
remaining_unlabeled_indices = list(set(range(len(unlabeled_dataset))) - set(rec_indices))
unlabeled_dataset = Subset(unlabeled_dataset, remaining_unlabeled_indices)
# Restore the model
net.load_state_dict(rec_state_dict)
# Fix the initial round
initial_round = (len(train_dataset) - initial_seed_size) // budget + 1
# Ensure loaded model is moved to GPU
if torch.cuda.is_available():
net = net.cuda()
strategy.update_model(net)
strategy.update_data(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset))
dt = data_train(train_dataset, net, args)
else:
if torch.cuda.is_available():
net = net.cuda()
dt = data_train(train_dataset, net, args)
acc[0] = dt.get_acc_on_set(test_dataset)
print('Initial Testing accuracy:', round(acc[0]*100, 2), flush=True)
logs = {}
logs['Training Points'] = len(train_dataset)
logs['Test Accuracy'] = str(round(acc[0]*100, 2))
write_logs(logs, save_directory, 0)
#Updating the trained model in strategy class
strategy.update_model(net)
# Record the training transform and test transform for disabling purposes
train_transform = full_train_dataset.transform
test_transform = test_dataset.transform
##User Controlled Loop
for rd in range(initial_round, n_rounds+1):
print('-------------------------------------------------')
print('Round', rd)
print('-------------------------------------------------')
sel_time = time.time()
full_train_dataset.transform = test_transform # Disable any augmentation while selecting points
idx = strategy.select(budget)
full_train_dataset.transform = train_transform # Re-enable any augmentation done during training
sel_time = time.time() - sel_time
print("Selection Time:", sel_time)
selected_unlabeled_points = Subset(unlabeled_dataset, idx)
train_dataset = ConcatDataset([train_dataset, selected_unlabeled_points])
remaining_unlabeled_indices = list(set(range(len(unlabeled_dataset))) - set(idx))
unlabeled_dataset = Subset(unlabeled_dataset, remaining_unlabeled_indices)
# Update the index map
index_map = np.delete(index_map, idx, axis = 0)
print('Number of training points -', len(train_dataset))
# Start training
strategy.update_data(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset))
dt.update_data(train_dataset)
t1 = time.time()
clf, train_logs = dt.train(None)
t2 = time.time()
acc[rd] = dt.get_acc_on_set(test_dataset)
logs = {}
logs['Training Points'] = len(train_dataset)
logs['Test Accuracy'] = str(round(acc[rd]*100, 2))
logs['Selection Time'] = str(sel_time)
logs['Trainining Time'] = str(t2 - t1)
logs['Training'] = train_logs
write_logs(logs, save_directory, rd)
strategy.update_model(clf)
print('Testing accuracy:', round(acc[rd]*100, 2), flush=True)
# Create a checkpoint
used_indices = np.array([x for x in range(initial_unlabeled_size)])
used_indices = np.delete(used_indices, index_map).tolist()
round_checkpoint = Checkpoint(acc.tolist(), used_indices, clf.state_dict(), experiment_name=experiment_name)
round_checkpoint.save_checkpoint(checkpoint_directory)
print('Training Completed')
return acc
```
# CIFAR10
## Parameter Definitions
Parameters related to the specific experiment are placed here. You should examine each and modify them as needed.
```
data_set_name = "CIFAR10" # DSET NAME HERE
dataset_root_path = '../downloaded_data/'
net = ResNet18() # MODEL HERE
# MODIFY AS NECESSARY
logs_directory = '/content/gdrive/MyDrive/colab_storage/logs/'
checkpoint_directory = '/content/gdrive/MyDrive/colab_storage/check/'
model_directory = "/content/gdrive/MyDrive/colab_storage/model/"
experiment_name = "CIFAR10 RESET"
initial_seed_size = 1000 # INIT SEED SIZE HERE
training_size_cap = 25000 # TRAIN SIZE CAP HERE
budget = 3000 # BUDGET HERE
# CHANGE ARGS AS NECESSARY
args = {'n_epoch':300, 'lr':float(0.01), 'batch_size':20, 'max_accuracy':float(0.99), 'islogs':True, 'isreset':True, 'isverbose':True, 'device':'cuda'}
# Train on approximately the full dataset given the budget contraints
n_rounds = (training_size_cap - initial_seed_size) // budget
```
## Initial Loading and Training
You may choose to train a new initial model or to continue to load a specific model. If this notebook is being executed in Colab, you should consider whether or not you need the gdown line.
```
# Mount drive containing possible saved model and define file path.
colab_model_storage_mount = "/content/gdrive"
drive.mount(colab_model_storage_mount)
# Retrieve the model from a download link and save it to the drive
os.makedirs(logs_directory, exist_ok = True)
os.makedirs(checkpoint_directory, exist_ok = True)
os.makedirs(model_directory, exist_ok = True)
model_directory = F"{model_directory}/{data_set_name}"
#!/content/gdown.pl/gdown.pl "INSERT SHARABLE LINK HERE" "INSERT DOWNLOAD LOCATION HERE (ideally, same as model_directory)" # MAY NOT NEED THIS LINE IF NOT CLONING MODEL FROM COLAB
# Load the dataset
if data_set_name == "CIFAR10":
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
full_train_dataset = datasets.CIFAR10(dataset_root_path, download=True, train=True, transform=train_transform, target_transform=torch.tensor)
test_dataset = datasets.CIFAR10(dataset_root_path, download=True, train=False, transform=test_transform, target_transform=torch.tensor)
nclasses = 10 # NUM CLASSES HERE
elif data_set_name == "CIFAR100":
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))])
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))])
full_train_dataset = datasets.CIFAR100(dataset_root_path, download=True, train=True, transform=train_transform, target_transform=torch.tensor)
test_dataset = datasets.CIFAR100(dataset_root_path, download=True, train=False, transform=test_transform, target_transform=torch.tensor)
nclasses = 100 # NUM CLASSES HERE
elif data_set_name == "MNIST":
image_dim=28
train_transform = transforms.Compose([transforms.RandomCrop(image_dim, padding=4), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
test_transform = transforms.Compose([transforms.Resize((image_dim, image_dim)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
full_train_dataset = datasets.MNIST(dataset_root_path, download=True, train=True, transform=train_transform, target_transform=torch.tensor)
test_dataset = datasets.MNIST(dataset_root_path, download=True, train=False, transform=test_transform, target_transform=torch.tensor)
nclasses = 10 # NUM CLASSES HERE
elif data_set_name == "FashionMNIST":
train_transform = transforms.Compose([transforms.RandomCrop(28, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) # Use mean/std of MNIST
full_train_dataset = datasets.FashionMNIST(dataset_root_path, download=True, train=True, transform=train_transform, target_transform=torch.tensor)
test_dataset = datasets.FashionMNIST(dataset_root_path, download=True, train=False, transform=test_transform, target_transform=torch.tensor)
nclasses = 10 # NUM CLASSES HERE
elif data_set_name == "SVHN":
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # ImageNet mean/std
full_train_dataset = datasets.SVHN(dataset_root_path, split='train', download=True, transform=train_transform, target_transform=torch.tensor)
test_dataset = datasets.SVHN(dataset_root_path, split='test', download=True, transform=test_transform, target_transform=torch.tensor)
nclasses = 10 # NUM CLASSES HERE
elif data_set_name == "ImageNet":
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # ImageNet mean/std
# Note: Not automatically downloaded due to size restrictions. Notebook needs to be adapted to run on local device.
full_train_dataset = datasets.ImageNet(dataset_root_path, download=False, split='train', transform=train_transform, target_transform=torch.tensor)
test_dataset = datasets.ImageNet(dataset_root_path, download=False, split='val', transform=test_transform, target_transform=torch.tensor)
nclasses = 1000 # NUM CLASSES HERE
args['nclasses'] = nclasses
dim = full_train_dataset[0][0].shape
# Seed the random number generator for reproducibility and create the initial seed set
np.random.seed(42)
initial_train_indices = np.random.choice(len(full_train_dataset), replace=False, size=initial_seed_size)
# COMMENT OUT ONE OR THE OTHER IF YOU WANT TO TRAIN A NEW INITIAL MODEL
load_model = False
#load_model = True
# Only train a new model if one does not exist.
if load_model:
net.load_state_dict(torch.load(model_directory))
initial_model = net
else:
dt = data_train(Subset(full_train_dataset, initial_train_indices), net, args)
initial_model, _ = dt.train(None)
torch.save(initial_model.state_dict(), model_directory)
print("Training for", n_rounds, "rounds with budget", budget, "on unlabeled set size", training_size_cap)
```
## Random Sampling
```
strategy = "random"
strat_logs = logs_directory+F'{data_set_name}/{strategy}/'
os.makedirs(strat_logs, exist_ok = True)
train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}")
```
## Entropy
```
strategy = "entropy"
strat_logs = logs_directory+F'{data_set_name}/{strategy}/'
os.makedirs(strat_logs, exist_ok = True)
train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}")
```
## GLISTER
```
strategy = "glister"
strat_logs = logs_directory+F'{data_set_name}/{strategy}/'
os.makedirs(strat_logs, exist_ok = True)
train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}")
```
## FASS
```
strategy = "fass"
strat_logs = logs_directory+F'{data_set_name}/{strategy}/'
os.makedirs(strat_logs, exist_ok = True)
train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}")
```
## BADGE
```
strategy = "badge"
strat_logs = logs_directory+F'{data_set_name}/{strategy}/'
os.makedirs(strat_logs, exist_ok = True)
train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}")
```
## CoreSet
```
strategy = "coreset"
strat_logs = logs_directory+F'{data_set_name}/{strategy}/'
os.makedirs(strat_logs, exist_ok = True)
train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}")
```
## Least Confidence
```
strategy = "least_confidence"
strat_logs = logs_directory+F'{data_set_name}/{strategy}/'
os.makedirs(strat_logs, exist_ok = True)
train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}")
```
## Margin
```
strategy = "margin"
strat_logs = logs_directory+F'{data_set_name}/{strategy}/'
os.makedirs(strat_logs, exist_ok = True)
train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}")
```
| github_jupyter |
<a href="http://landlab.github.io"><img style="float: left" src="../../landlab_header.png"></a>
WARNING: This tutorial has not been updated to work with Landlab 2.0 and is thus not tested to verify that it will run.
### Tutorial For Cellular Automaton Vegetation Model Coupled With Ecohydrologic Model
<hr>
<small>For more Landlab tutorials, click here: <a href="https://landlab.readthedocs.io/en/v2_dev/user_guide/tutorials.html">https://landlab.readthedocs.io/en/v2_dev/user_guide/tutorials.html</a></small>
<hr>
This tutorial demonstrates implementation of the Cellular Automaton Tree-GRass-Shrub Simulator (CATGRaSS) [Zhou et al., 2013] on a digital elevation model (DEM). This model is built using components from the Landlab component library. CATGRaSS is a spatially explicit model of plant coexistence. It simulates local ecohydrologic dynamics (soil moisture, transpiration, biomass) and spatial evolution of tree, grass, and shrub Plant Functional Types (PFT) driven by rainfall and solar radiation.
Each cell in the model grid can hold a single PFT or remain empty. Tree and shrub plants disperse seeds to their neighbors. Grass seeds are assumed to be available at each cell. Establishment of plants in empty cells is determined probabilistically based on water stress for each PFT. Plants with lower water stress have higher probability of establishment. Plant mortality is simulated probabilistically as a result of aging and drought stress. Fires and grazing will be added to this model soon.
This model (driver) contains:
- A local vegetation dynamics model that simulates storm and inter-storm water balance and ecohydrologic fluxes (ET, runoff), and plant biomass dynamics by coupling the following components:
- PrecipitationDistribution
- Radiation
- PotentialEvapotranspiration
- SoilMoisture
- Vegetation
- A spatially explicit probabilistic cellular automaton component that simulates plant competition by tracking establishment and mortality of plants based on soil moisture stress:
- VegCA
To run this Jupyter notebook, please make sure that the following files are in the same folder:
- cellular_automaton_vegetation_DEM.ipynb (this notebook)
- Inputs_Vegetation_CA.txt (Input parameters for the model)
- Ecohyd_functions_DEM.py (Utility functions)
[Ref: Zhou, X, E. Istanbulluoglu, and E.R. Vivoni. "Modeling the ecohydrological role of aspect-controlled radiation on tree-grass-shrub coexistence in a semiarid climate." Water Resources Research 49.5 (2013): 2872-2895]
In this tutorial, we are going to work with a landscape in central New Mexico, USA, where aspect controls the organization of PFTs. The climate in this area is semi-arid with Mean Annual Precipitation (MAP) of 254 mm [Zhou et. al 2013].
We will do the following:
- Import a landscape
- Initialize the landscape with random distribution of PFTs
- Run the coupled Ecohydrology and cellular automata plant competition model for 50 years
- Visualize and examine outputs
#### Let's walk through the code:
Import the required libraries:
```
from __future__ import print_function
%matplotlib inline
import time
import numpy as np
from landlab.io import read_esri_ascii
from landlab import RasterModelGrid as rmg
from landlab import load_params
from Ecohyd_functions_DEM import (Initialize_, Empty_arrays, Create_PET_lookup,
Save_, Plot_)
```
Note: 'Ecohyd_functions_DEM.py' is a utility script that contains 'functions', which instantiates components and manages inputs and outputs, and help keep this driver concise. Contents of 'Ecohyd_functions_DEM.py' can be a part of this driver (current file), however left out to keep driver concise.
We will use two grids in this driver. One grid will represent the actual landscape or domain (e.g., created from a DEM). Another grid, with one cell for each of the plant functional types (PFTs), will be used to create Potential Evapotranspiration (PET) lookup tables.
- grid: This grid represents the actual landscape. Each cell can be occupied by a single PFT such as tree, shrub, grass, or can be empty (bare). In this example we assume that the elevation field and the vegetation field has the same resolution.
- grid1: This grid will be used to compute plant-specific PET at a point. Spatially distributed PET Lookup arrays (for all days of the year) will be created for each PFT based on these point values.
Note: In this tutorial, the physical ecohydrological components and cellular automata plant competition will be run on grids with same resolution. To develop differential spatial resolutions for the two models, see the tutorial 'cellular_automaton_vegetation_flat.ipynb'.
```
(grid, elevation) = read_esri_ascii('DEM_10m.asc') # Read the DEM
grid1 = rmg((5, 4), xy_spacing=(5., 5.)) # Representative grid
```
Include the input file that contains all input parameters needed for all components. This file can either be a Python dictionary or a text file that can be converted into a Python dictionary. If a text file is provided, it will be converted to a Python dictionary. Here we use an existing text file prepared for this exercise.
```
InputFile = 'Inputs_Vegetation_CA_DEM.txt'
data = load_params(InputFile) # Creates dictionary that holds the inputs
```
Instantiate Landlab components to simulate corresponding attributes. In this example, we shall demonstrate the use of seasonal rainfall and PFT-specific potential evapotranspiration. The instantiated objects are:
- PD_D: object for dry season rainfall,
- PD_W: object for wet season rainfall,
- Rad: Radiation object computes radiation factor defined as the ratio of total shortwave radiation incident on a sloped surface to total shortwave radiation incident on a flat surface.
- Rad_PET: Representative radiation object which is used only as an input for PET.
- PET_PFT: Plant specific PET objects (we use a cosine function, fitted to calculated PET, as a function of Day Of the Year (DOY) to reduce computation overhead). This value is spatially distributed by using a radiation factor.
- SM: Soil Moisture object simulates root-zone average soil moisture at each cell using inputs of potential evapotranspiration, live leaf area index, and vegetation cover.
- VEG: Vegetation dynamics object simulates net primary productivity and biomass and thus leaf area index at each cell based on inputs of root-zone average soil moisture.
- vegca: Cellular Automaton plant competition object. This object simulates the spatial dynamics of PFTs. It is run once every year at the end of the growing season. This object is initialized with a random cellular field of PFT. Each year, this object updates the field of PFTs based on probabilistic establishment and mortality rules employed at each cell of the modeled DEM.
Note: Almost every component in Landlab is coded as a 'class' (to harness the advantages of object oriented programming). An 'object' is the instantiation of the 'class' (for more information, please refer any object oriented programming book). A 'field' refers to a Landlab field (please refer to the [Landlab documentation](https://github.com/landlab/landlab/wiki/Grid#adding-data-to-a-landlab-grid-element-using-fields) to learn more about Landlab fields).
Now let's instantiate all Landlab components that we are going to use for this tutorial:
```
PD_D, PD_W, Rad, Rad_PET, PET_Tree, PET_Shrub, PET_Grass, SM, VEG, vegca = (
Initialize_(data, grid, grid1, elevation))
```
Lets look at the initial organization of PFTs
```
import matplotlib.pyplot as plt
from landlab.plot import imshow_grid
import matplotlib as mpl
cmap = mpl.colors.ListedColormap(
['green', 'red', 'black', 'white', 'red', 'black'])
bounds = [-0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
description = 'green: grass; red: shrub; black: tree; white: bare'
plt.figure(101)
imshow_grid(grid,
'vegetation__plant_functional_type',
values_at='cell',
cmap=cmap,
grid_units=('m', 'm'),
norm=norm,
limits=[0, 5],
allow_colorbar=False)
plt.figtext(0.2, 0.0, description, weight='bold', fontsize=10)
```
Specify an approximate number of years for the model to run.
IMPORTANT:
This code in numerically extensive. It might take an hour or more to run this simulation for 300 years. It is suggested to run the simulation for 50 years which might take less than 7 minutes to execute.
```
n_years = 50 # Approx number of years for model to run
# Calculate approximate number of storms per year
fraction_wet = (data['doy__end_of_monsoon'] -
data['doy__start_of_monsoon']) / 365.
fraction_dry = 1 - fraction_wet
no_of_storms_wet = (8760 * (fraction_wet) /
(data['mean_interstorm_wet'] + data['mean_storm_wet']))
no_of_storms_dry = (8760 * (fraction_dry) /
(data['mean_interstorm_dry'] + data['mean_storm_dry']))
n = int(n_years * (no_of_storms_wet + no_of_storms_dry))
```
Create empty arrays to store spatio-temporal data over multiple iterations. The captured data can be used for plotting model outputs.
```
P, Tb, Tr, Time, VegType, PET_, Rad_Factor, EP30, PET_threshold = (
Empty_arrays(n, n_years, grid, grid1))
```
To reduce computational overhead, we shall create a lookup array for plant-specific PET values for each day of the year, and slope and aspect grid.
```
Create_PET_lookup(Rad, PET_Tree, PET_Shrub, PET_Grass, PET_, Rad_Factor, EP30,
Rad_PET, grid)
```
Specify current_time (in years). current_time is the current time in the simulation.
```
# # Represent current time in years
current_time = 0 # Start from first day of Jan
# Keep track of run time for simulation—optional
Start_time = time.clock() # Recording time taken for simulation
# declaring few variables that will be used in storm loop
time_check = 0. # Buffer to store current_time at previous storm
yrs = 0 # Keep track of number of years passed
WS = 0. # Buffer for Water Stress
Tg = 365 # Growing season in days
```
The loop below couples the components introduced above in a for loop until all "n" number of storms are generated. Time is advanced by the soil moisture object based on storm and interstorm durations that are estimated by the strom generator object. The ecohydrologic model is run on each storm whereas cellular automaton vegetation component is run once every year.
Note: This loop might take around 10 minutes (depending on your computer) to run for a 50 year simulation. Ignore any warnings you might see.
```
# # Run storm Loop
for i in range(0, n):
# # Update objects
# Calculate Day of Year (DOY)
Julian = int(np.floor((current_time - np.floor(current_time)) * 365.))
# Generate seasonal storms
# for Dry season
if Julian < data['doy__start_of_monsoon'] or Julian > data[
'doy__end_of_monsoon']:
PD_D.update()
P[i] = PD_D.get_storm_depth()
Tr[i] = PD_D.get_precipitation_event_duration()
Tb[i] = PD_D.get_interstorm_event_duration()
# Wet Season—Jul to Sep—NA Monsoon
else:
PD_W.update()
P[i] = PD_W.get_storm_depth()
Tr[i] = PD_W.get_precipitation_event_duration()
Tb[i] = PD_W.get_interstorm_event_duration()
# Spatially distribute PET and its 30-day-mean (analogous to degree day)
grid['cell']['surface__potential_evapotranspiration_rate'] = (
(np.choose(grid['cell']['vegetation__plant_functional_type'],
PET_[Julian])) * Rad_Factor[Julian])
grid['cell']['surface__potential_evapotranspiration_30day_mean'] = (
(np.choose(grid['cell']['vegetation__plant_functional_type'],
EP30[Julian])) * Rad_Factor[Julian])
# Assign spatial rainfall data
grid['cell']['rainfall__daily_depth'] = P[i] * np.ones(
grid.number_of_cells)
# Update soil moisture component
current_time = SM.update(current_time, Tr=Tr[i], Tb=Tb[i])
# Decide whether its growing season or not
if Julian != 364:
if EP30[Julian + 1, 0] > EP30[Julian, 0]:
PET_threshold = 1
# 1 corresponds to ETThresholdup (begin growing season)
else:
PET_threshold = 0
# 0 corresponds to ETThresholddown (end growing season)
# Update vegetation component
VEG.update(PETthreshold_switch=PET_threshold, Tb=Tb[i], Tr=Tr[i])
# Update yearly cumulative water stress data
WS += (grid['cell']['vegetation__water_stress']) * Tb[i] / 24.
# Record time (optional)
Time[i] = current_time
# Cellular Automata
if (current_time - time_check) >= 1.:
if yrs % 5 == 0:
print('Elapsed time = {time} years'.format(time=yrs))
VegType[yrs] = grid['cell']['vegetation__plant_functional_type']
grid['cell']['vegetation__cumulative_water_stress'] = WS / Tg
vegca.update()
SM.initialize()
VEG.initialize()
time_check = current_time
WS = 0
yrs += 1
VegType[yrs] = grid['cell']['vegetation__plant_functional_type']
```
Time_Consumed is an optional variable that gives information about computer running time
```
Final_time = time.clock()
Time_Consumed = (Final_time - Start_time) / 60. # in minutes
print('Time_consumed = {time} minutes'.format(time=Time_Consumed))
```
Save the outputs using ``numpy.save()``. These files have '.nc' extension, which can be loaded using ``numpy.load()``.
```
# # Saving
sim = 'VegCA_DEM_26Jul16_'
# Save_(sim, Tb, Tr, P, VegType, yrs, Time_Consumed, Time)
```
Lets look at outputs.
Plots of the cellular field of PFT at specified year step can be found below where:
GRASS = Green; SHRUB = Red; TREE = Black; BARE = White;
At the end, percentage cover for each PFT is plotted with respect to time.
```
Plot_(grid, VegType, yrs, yr_step=10)
```
#### If you run this model for around 900 years, you will observe patterns of PFTs. For example, you will find more trees on north facing slopes and mostly shrubs and grass on south facing slopes, as shown below:
```
from IPython.display import Image
Image(filename='presentation.png')
```
If you want to explore this model further, open 'Inputs_Vegetation_CA.txt' and change the input parameters (e.g., initial PFT distribution percentages, storm characteristics, etc..).
### Click here for more <a href="https://landlab.readthedocs.io/en/v2_dev/user_guide/tutorials.html">Landlab tutorials</a>
| github_jupyter |
# Building a brain object
Brain objects are supereeg's fundamental data structure for a single subject's ECoG data. To create one at minimum you'll need a matrix of neural recordings (time samples by electrodes), electrode locations, and a sample rate. Additionally, you can include information about separate recording sessions and store custom meta data. In this tutorial, we'll build a brain object from scratch and get familiar with some of the methods.
# Load in the required libraries
```
import supereeg as se
import numpy as np
import warnings
warnings.simplefilter("ignore")
%matplotlib inline
```
# Simulate some data
First, we'll use supereeg's built in simulation functions to simulate some data and electrodes. By default, the `simulate_data` function will return a 1000 samples by 10 electrodes matrix, but you can specify the number of time samples with `n_samples` and the number of electrodes with `n_elecs`. If you want further information on simulating data, check out the simulate tutorial!
```
# simulate some data
bo_data = se.simulate_bo(n_samples=1000, sessions=2, n_elecs=10)
# plot it
bo_data.plot_data()
# get just data
data = bo_data.get_data()
```
We'll also simulate some electrode locations
```
locs = se.simulate_locations()
print(locs)
```
# Creating a brain object
To construct a new brain objects, simply pass the data and locations to the `Brain` class like this:
```
bo = se.Brain(data=data, locs=locs, sample_rate=100)
```
To view a summary of the contents of the brain object, you can call the `info` function:
```
bo.info()
```
Optionally, you can pass a `sessions` parameter, which is can be a numpy array or list the length of your data with a unique identifier for each session. For example:
```
sessions = np.array([1]*(data.shape[0]/2)+[2]*(data.shape[0]/2))
bo = se.Brain(data=data, locs=locs, sample_rate=1000, sessions=sessions)
bo.info()
```
You can also add custom meta data to the brain object to help keep track of its contents. `meta` is a dictionary comprised of whatever you want:
```
meta = {
'subjectID' : '123',
'Investigator' : 'Andy',
'Hospital' : 'DHMC'
}
bo = se.Brain(data=data, locs=locs, sample_rate=1000, sessions=sessions, meta=meta)
bo.info()
```
# Initialize brain objects
`Brain` objects can be initialized by passing a any of the following to the `Brain` class instance initialization function:
- A path to a saved `Brain` object (ending in `.bo`)
- An existing `Brain` object (this creates a copy of the object)
- A path to or instance of any other supported toolbox type (`Model` objects or .mo files, or `Nifti` objects or .nii files)
In addition, `Brain` objects may be created via `load` by specifying `return_type='bo'`.
For example:
```
nii_bo = se.Brain('example_nifti')
```
Or:
```
nii_bo = se.load('example_nifti', return_type='bo')
```
Another feature, which can be particularly useful when working with large files, is loading only a subfield by specifiying `field`. For example, if you only want to load locations:
```
bo_locs = se.load('example_data', field='locs')
```
# The structure of a brain object
Inside the brain object, the ECoG data are stored in a Pandas DataFrame that can be accessed with the `get_data` function:
```
bo.get_data().head()
```
Similarly, the electrode locations are stored as a Pandas DataFrame, and can be retrieved using the `get_locs` method:
```
bo.get_locs().head()
```
Brain objects are iterable, so you index a brain object like this:
```
#return first time sample
bo[0]
#return first 3 time samples
bo[:3]
#return first electrode
bo[:, 0]
#returns first 3 timesamples/elecs
bo_i = bo[:3, :3]
bo_i.get_data()
```
You can also pass a list of indices for either `times` (sample numbers) or `locs` to the `get_slice` method and return a subset of the brain object.
```
bo_s = bo.get_slice(sample_inds=[0,1,2], loc_inds=[0,1,2])
bo_s.get_data()
```
You can resample your data by specifying a new sample rate
```
bo.resample(64)
bo.info()
```
You can also plot both the data and the electrode locations:
```
bo.plot_data()
bo.plot_locs()
```
The other pieces of the brain object are listed below:
```
# array of session identifiers for each timepoint
sessions = bo.sessions
# number of sessions
n_sessions = bo.n_sessions
# sample rate
sample_rate = bo.sample_rate
# number of electrodes
n_elecs = bo.n_elecs
# length of each recording session in seconds
n_seconds = bo.dur
# the date and time that the bo was created
date_created = bo.date_created
# kurtosis of each electrode
kurtosis = bo.kurtosis
# meta data
meta = bo.meta
# label delinieating observed and reconstructed locations
label = bo.label
```
# Brain object methods
There are a few other useful methods on a brain object
## `bo.info()`
This method will give you a summary of the brain object:
```
bo.info()
```
## `bo.apply_filter()`
This method will return a filtered copy of the brain object.
```
bo_f = bo.apply_filter()
```
## `bo.get_data()`
```
data_array = bo.get_data()
```
## `bo.get_zscore_data()`
This method will return a numpy array of the zscored data:
```
zdata_array = bo.get_zscore_data()
```
## `bo.get_locs()`
This method will return a numpy array of the electrode locations:
```
locs = bo.get_locs()
```
## `bo.get_slice()`
This method allows you to slice out time and locations from the brain object, and returns a brain object. This can occur in place if you set the flag `inplace=True`.
```
bo_slice = bo.get_slice(sample_inds=None, loc_inds=None, inplace=False)
```
## `bo.resample()`
This method allows you resample a brain object in place.
```
bo.resample(resample_rate=None)
```
## `bo.plot_data()`
This method normalizes and plots data from brain object:
```
bo.plot_data()
```
## `bo.plot_locs()`
This method plots electrode locations from brain object:
```
bo_f = se.load('example_filter')
bo_f.plot_locs()
```
## `bo.to_nii()`
This method converts the brain object into supereeg's `nifti` class (a subclass of the `nibabel` nifti class). If `filepath` is specified, the nifti file will be saved. You can also specify a nifti template with the `template` argument. If no template is specified, it will use the gray matter masked MNI 152 brain downsampled to 6mm.
```
# convert to nifti
nii = bo.to_nii(template='gray', vox_size=6)
# plot first timepoint
nii.plot_glass_brain()
# save the file
# nii = bo.to_nii(filepath='/path/to/file/brain')
# specify a template and resolution
# nii = bo.to_nii(template='/path/to/nifti/file.nii', vox_size=20)
```
## `bo.save(fname='something')`
This method will save the brain object to the specified file location.
The data will be saved as a 'bo' file, which is a dictionary containing the elements of a brain object saved in the hd5 format using `deepdish`.
```
#bo.save(fname='brain_object')
```
| github_jupyter |
# Example Sensor Data ML Notebook
Examples of humidity and temperature sensor telemetry Machine Learning (ML) using scikit-learn (https://scikit-learn.org/stable/)
_Prepared by: [Gary A. Stafford](https://twitter.com/GaryStafford)
Associated article: https://wp.me/p1RD28-6l6_
```
import os
import warnings
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymongo
from dotenv import load_dotenv
from matplotlib.colors import ListedColormap
from mlxtend.plotting import plot_decision_regions
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
warnings.filterwarnings('ignore')
%matplotlib inline
```
## Load Environment Variables and Establish MongoDB Atlas Connection
```
%%time
# load env vars from local .env file
load_dotenv()
# establish db connection
MONGODB_CONN = os.environ.get('MONGODB_CONN')
MONGODB_DB = os.environ.get('MONGODB_DB')
MONGODB_COL = os.environ.get('MONGODB_COL')
client = pymongo.MongoClient(MONGODB_CONN)
db = client[MONGODB_DB]
iot_data = db[MONGODB_COL]
```
## Define Panda DataFrames for (3) DHT Sensors
Panda DataFrames:
1. DataFrame 1: df1 = rb47a3d9f5
2. DataFrame 2: df2 = rp829c7e0e
3. DataFrame 3: df3 = rp47a3d9f5
```
DEVICE_1 = 'rp59adf374'
DEVICE_2 = 'rp829c7e0e'
DEVICE_3 = 'rp47a3d9f5'
```
### DataFrame 1
```
%%time
# aggregation created and exported from MongoDB Compass
# ~72-hour period from 5/11/2019 8:00:00 PM - 5/14/2019 8:00:00 PM
pipeline = [
{
'$match': {
'type': 'DHT22',
'device': DEVICE_1,
'timestamp': {
'$gt': 1557619200,
'$lt': 1557878400
}
}
}, {
'$project': {
'_id': 0,
'device': 1,
'temperature': '$data.temperature',
'humidity': '$data.humidity'
}
}
]
aggResult = iot_data.aggregate(pipeline)
df1 = pd.DataFrame(list(aggResult))
# convert celsius to fahrenheit
df1['temperature'] = ((df1.temperature * 1.8) + 32)
# attempt to remove outliers (sensor anomalies)
qt = df1['temperature'].quantile(0.01)
df1 = df1[df1['temperature'] >= qt]
qh = df1['humidity'].quantile(0.01)
df1 = df1[df1['humidity'] >= qh]
```
### DataFrame 2
```
%%time
# aggregation created and exported from MongoDB Compass
# ~72-hour period from 5/11/2019 8:00:00 PM - 5/14/2019 8:00:00 PM
pipeline = [
{
'$match': {
'type': 'DHT22',
'device': DEVICE_2,
'timestamp': {
'$gt': 1557619200,
'$lt': 1557878400
}
}
}, {
'$project': {
'_id': 0,
'device': 1,
'temperature': '$data.temperature',
'humidity': '$data.humidity'
}
}
]
aggResult = iot_data.aggregate(pipeline)
df2 = pd.DataFrame(list(aggResult))
# convert celsius to fahrenheit
df2['temperature'] = ((df2.temperature * 1.8) + 32)
# attempt to remove outliers (sensor anomalies)
qt = df2['temperature'].quantile(0.01)
df2 = df2[df2['temperature'] >= qt]
qh = df2['humidity'].quantile(0.01)
df2 = df2[df2['humidity'] >= qh]
```
### DataFrame 3
```
%%time
# aggregation created and exported from MongoDB Compass
# ~32-hour period
# 2019-05-17 21:58:04.746520996-04:00
# 2019-05-19 07:59:55.743566036-04:00
pipeline = [
{
'$match': {
'type': 'DHT22',
'device': DEVICE_3,
'timestamp': {
'$gt': 1558094400,
'$lt': 1558267200
}
}
}, {
'$project': {
'_id': 0,
'device': 1,
'temperature': '$data.temperature',
'humidity': '$data.humidity'
}
}
]
aggResult = iot_data.aggregate(pipeline)
df3 = pd.DataFrame(list(aggResult))
# convert celsius to fahrenheit
df3['temperature'] = ((df3.temperature * 1.8) + 32)
# attempt to remove outliers (sensor anomalies)
qt = df3['temperature'].quantile(0.01)
df3 = df3[df3['temperature'] >= qt]
qh = df3['humidity'].quantile(0.01)
df3 = df3[df3['humidity'] >= qh]
```
## Prepare Data for Modeling
```
# concat the (3) dataframes (different iot devices)
df_devices = [df1, df2, df3]
df_devices = pd.concat(df_devices, sort=False)
df_devices = df_devices.sample(frac=1)
# split dataframe into data and labels numpy.ndarray's
X = df_devices.iloc[:, [1,2]].values
y = df_devices.iloc[:, 0].values
# encode labels with value between 0 and n_classes-1
le = preprocessing.LabelEncoder()
le.fit(y)
y = le.transform(y)
print('df_devices:\n', df_devices.head(5), '\n')
print('X:\n', X[:5], '\n')
print('y:\n', y[:10], '\n')
df_devices.head(10)
# split data into training and testing data
test_size = 0.25
seed = 7
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=seed)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
```
### Model/Data Plotting Function
```
# plot using mlxtend
def plot_model_mlxtend(clf, title):
plt.figure(figsize=(14,7))
plot_decision_regions(X, y, clf=clf, legend=1)
plt.xlabel('Humidity (%)')
plt.ylabel('Temperature (°F)')
plt.title(title)
plt.legend(title="Classes")
plt.show()
# plot using matplotlib.pyplot
def plot_model_np(clf, title):
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
h = (x_max / x_min)/100
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
fig, ax = plt.subplots(figsize=(14,7))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=cmap_light, alpha=0.7)
scatter = ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlabel('Humidity (%)')
plt.ylabel('Temperature (°F)')
plt.xlim(xx.min(), xx.max())
plt.title(title)
legend = ax.legend(*scatter.legend_elements(), title="Classes") # requires matplotlib >=3.1.0
ax.add_artist(legend)
plt.show()
```
### Computing Cross-Validation Metrics
<https://scikit-learn.org/stable/modules/cross_validation.html>
```
def cross_validation(model):
model.score(X_test, y_test)
scores = cross_val_score(model, X_test, y_test, cv=5)
return "Model accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)
```
### Make Predictions using Model
```
def make_prediction(model, x, y):
return le.inverse_transform(model.predict([[x, y]]))[0]
```
### Saving/Loading Models
```
def save_restore_model(model):
# now you can save it to a file
joblib_file = "joblib_model.pkl"
joblib.dump(model, joblib_file)
# load from file
joblib_model = joblib.load(joblib_file)
# test loaded model
print(make_prediction(model, 70, 65))
print(make_prediction(model, 50, 70))
print(make_prediction(model, 50, 65))
```
## Predictions using Support Vector Machine
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
References:
* <https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html>
* <https://www.analyticsvidhya.com/blog/2017/09/understaing-support-vector-machine-example-code>
* <https://scikit-learn.org/stable/modules/svm.html#kernel-functions>
```
%%time
# train C-Support Vector Classification (SVC) model and plot
title = 'SVC with linear kernel'
model = SVC(kernel='linear', C=1, gamma='auto')
model.fit(X_train, y_train)
plot_model_mlxtend(model, title)
plot_model_np(model, title)
# use linear SVC model
print(cross_validation(model))
print(make_prediction(model, 70, 65))
print(make_prediction(model, 50, 70))
print(make_prediction(model, 50, 65))
save_restore_model(model)
%%time
# train C-Support Vector Classification (SVC) model and plot
model = SVC(kernel='rbf', C=1, gamma='auto')
model.fit(X_train, y_train)
plot_model_mlxtend(model, 'SVC with rbf kernel')
# use rbf SVC model
print(cross_validation(model))
print(make_prediction(model, 70, 65))
print(make_prediction(model, 50, 70))
print(make_prediction(model, 50, 65))
```
## Predictions using K-Nearest Neighbors Classifier
* k-nearest neighbors algorithm (k-NN)
* <https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm>
```
%%time
# train k-NN model and plot
model = KNeighborsClassifier(n_neighbors=2, algorithm='auto')
model.fit(X_train, y_train)
plot_model_mlxtend(model, 'k-NN Classifier')
# use k-NN model
print(cross_validation(model))
print(make_prediction(model, 70, 65))
print(make_prediction(model, 50, 70))
print(make_prediction(model, 50, 65))
```
## Predictions using Random Forest Classifier
* Estimator that fits a number of decision tree classifiers on various sub-samples of the dataset
* Uses averaging to improve the predictive accuracy and control over-fitting
* <https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>
```
%%time
# train Random Forest Classifier model and plot
model = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
model.fit(X_train, y_train)
plot_model_mlxtend(model, 'Random Forest Classifier')
# use Random Forest Classifier model
print(cross_validation(model))
print(make_prediction(model, 70, 65))
print(make_prediction(model, 50, 70))
print(make_prediction(model, 50, 65))
```
| github_jupyter |
```
from os import path
import numpy as np
import matplotlib.pyplot as plt
from bentdna import shapefourier
from bentdna.find_haxis_curve import CurvePlusAgent
from bentdna.PDB import PDBReader, PDBWriter
workfolder = '/home/yizaochen/codes/dna_rna/length_effect/find_helical_axis'
```
### Part 0: Initialize
```
host = 'atat_21mer'
s_agent = shapefourier.ShapeAgent(workfolder, host)
```
### Part 1: Read $l_i$ and $\theta$
```
s_agent.read_l_modulus_theta()
```
### Part 2: $\theta(s)$ from MD, Frame-ID: 1
```
frame_id = 8000 # Ad hoc
df_filter = s_agent.get_filter_df(frame_id)
lbfz = 14
s_list, theta_list = s_agent.get_slist_thetalist(frame_id)
s_mid_list, interpolation_list = s_agent.get_smid_and_interpolation_theta(frame_id)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(16,4))
ax.plot(s_list, theta_list, '-o', label=r'$\theta(\mathrm{s})$')
ax.set_xlabel("s", fontsize=lbfz)
ax.set_ylabel(r"$\theta(s)$ (radian)", fontsize=lbfz)
title = f'Frame-ID: {frame_id}'
ax.set_title(title, fontsize=16)
ax.set_xticks(s_list)
ax.plot(s_mid_list, interpolation_list, 'o', color='red', label=r'$\theta(s_{\mathrm{mid}})$')
ax.legend(fontsize=14)
#plt.savefig("fromMD.png", dpi=100)
plt.show()
```
### Part 3: Drawing Decomposition
```
d_agent = shapefourier.DecomposeDraw(workfolder, host)
frame_id = 8000
figsize = (10, 14)
fig, axes = d_agent.plot_decompose_by_frame_id(frame_id, figsize)
plt.tight_layout()
#plt.savefig('decompose.svg')
plt.show()
```
### Part 4: VMD to draw illustration
```
pdb_folder = '/home/yizaochen/Documents/elite_final/images/Lp_explain/pdb_files'
heavy_folder = '/home/yizaochen/codes/dna_rna/all_systems/atat_21mer/bdna+bdna/input/heavyatoms'
avgpdb = path.join(heavy_folder, 'bdna+bdna.nohydrogen.avg.pdb')
fitavg_dcd = path.join(heavy_folder, 'bdna+bdna.nohydrogen.fitavg.dcd')
def change_resid_to_modi(pdb_in, pdb_out):
n_bp = 21
reader = PDBReader(pdb_in, segid_exist=True)
atgs = reader.get_atomgroup()
for atom in atgs:
if atom.segid == 'B':
atom.resid += n_bp
writer = PDBWriter(pdb_out, atgs)
writer.write_pdb()
cmd = f'vmd -pdb {avgpdb} {fitavg_dcd}'
print(cmd)
frame_id = 5362
cmd = f'animate write pdb {frame_id}.pdb beg {frame_id} end {frame_id}'
print(cmd)
frame_id = 1428
cmd = f'animate write pdb {frame_id}.pdb beg {frame_id} end {frame_id}'
print(cmd)
frame_id = 0
pdb_in = path.join(pdb_folder, f'{frame_id}.pdb')
pdb_out = path.join(pdb_folder, f'{frame_id}.modi.pdb')
change_resid_to_modi(pdb_in, pdb_out)
work_folder = path.join(pdb_folder, 'curve_work_dir')
n_bp = 21
frame_id = 0
lis_name = 'r+'
pdb_haxis_folder = work_folder
pdb_h_smooth_folder = work_folder
c_agent = CurvePlusAgent(pdb_out, workfolder, n_bp, lis_name, frame_id, pdb_haxis_folder, pdb_h_smooth_folder)
c_agent.print_exectue_curve_plus_cmd()
for frame_id in [0, 5362, 1428]:
print(f'mol new {frame_id}.modi.pdb type pdb')
print(f'mol new {frame_id}.smooth.pdb type pdb')
resid 4 to 18 or resid 25 to 39
serial 601 to 3600
```
| github_jupyter |
```
import tfcomb
C = tfcomb.CombObj(verbosity=0)
C.TFBS_from_motifs(regions="/mnt/workspace_stud/stud4/WP6_data/right-lobe-of-liver.11.bed",
motifs="/mnt/workspace_stud/stud6/repositories/Datenanalyse-2021/wp6/testdaten/JASPAR2020_CORE_vertebrates.meme",
genome="/mnt/workspace_stud/stud6/repositories/Datenanalyse-2021/wp6/testdaten/homo_sapiens.104.mainChr.fa",
threads=8)
```
#### running count_within() with stranded option turned on
```
C.count_within(stranded=True, threads=8)
C.market_basket()
```
### Analyze preferential orientation of motifs
##### First, we create a directionality analysis for the rules found:
```
df_orientation = C.analyze_orientation()
df_orientation.head()
```
We can subset these on pvalue and number of sites:
```
orientation_selected = df_orientation[(df_orientation["pvalue"] < 0.01)
& (df_orientation["TF1_TF2_count"] > 50)]
#Number of TF pairs with significant differences in orientation
orientation_selected.shape[0]
```
### Visualization of orientation preference
```
orientation_heatmap=orientation_selected.plot_heatmap()
```
We can select the subsets by investigating the selected pairs:
```
#orientation_selected.sort_values("same").tail(5)
#orientation_selected.sort_values("opposite").tail(5)
```
### Extended analysis with directional=True
#### The first analysis presented does not take into account the relative order of TF1-TF2, e.g. if the orientation "same" represents "TF1-TF2" or
```
C.count_within(directional=True, stranded=True, threads=8)
C.market_basket()
df_orientation = C.analyze_orientation()
df_orientation.head()
```
similarly to the first analysis, we can select the significant pairs and visualize the preferences for orientation:
```
orientation_selected = df_orientation[(df_orientation["pvalue"] < 0.05)
& (df_orientation["TF1_TF2_count"] > 50)]
orientation_selected.shape[0]
orientation_heatmap = orientation_selected.plot_heatmap()
```
### In-depth look at preferential orientation
By sorting the selected co-occurring TF pairs, it is also possible to visualize the top pairs within each scenario as seen below.
#### TFs specific in TF1-TF2 orientation
```
orientation_selected.sort_values("TF1-TF2", ascending=False).head(10)
```
#### TFs specific in TF2-TF1 orientation
```
orientation_selected.sort_values("TF2-TF1", ascending=False).head(10)
```
#### TFs specific in convergent orientation
```
orientation_selected.sort_values("convergent", ascending=False).head(10)
```
#### TFs specific in divergent orientation
```
orientation_selected.sort_values("divergent", ascending=False).head(10)
```
#### .loc-operator to show the results of a subset of TF1-TF2-pairs:
```
#df_orientation.loc[["EGR1-MYOD1","SP1-SP1","WT1-ZFP82"]]
TF_pairs =list(orientation_selected[["TF1","TF2"]].itertuples(index=False, name=None))
len(TF_pairs)
#selection=cobj.select_custom_rules(names)
#selection=df.select_custom_rules(names)
#selection=C.select_custom_rules(names)
#selection=selected.select_custom_rules(names)
from tfcomb import CombObj
genome_path="/mnt/workspace_stud/stud6/repositories/Datenanalyse-2021/wp6/testdaten/homo_sapiens.104.mainChr.fa"
motif_path="/mnt/workspace_stud/stud6/repositories/Datenanalyse-2021/wp6/testdaten/JASPAR2020_CORE_vertebrates.meme"
result_path="/mnt/workspace_stud/stud6/repositories/Datenanalyse-2021/wp6/analyse/results/wp2/main/"
Liver_object = CombObj().from_pickle(f"{result_path}right-lobe-of-liver.11.pkl")
TF_pairs_of_orientation_for_distance=Liver_object.select_custom_rules(TF_pairs)
TF_pairs_of_orientation_for_distance.rules
TF_pairs_of_orientation_for_distance.analyze_distances(threads=6)
TF_pairs_of_orientation_for_distance.distObj.evaluate_noise(threads=6)
TF_pairs_of_orientation_for_distance.distObj.rank_rules()
TF_pairs_of_orientation_for_distance.distObj.peaks
df_orientation_distance=TF_pairs_of_orientation_for_distance.distObj.peaks.merge(orientation_selected, left_on =["TF1","TF2"], right_on = ["TF1","TF2"])
df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)]
#df_orientation_distance
plot_distance_T1_TF2=df_orientation_distance.plot(x ='Distance', y='TF1-TF2', kind = 'scatter')
plot_distance_T1_TF2=df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].plot(x ='Distance', y='TF1-TF2', kind = 'scatter', c='Peak Heights', colormap='jet')
plot_distance_T2_TF1=df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].plot(x ='Distance', y='TF2-TF1', kind = 'scatter',c='Peak Heights', colormap='jet')
plot_distance_convergent=df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].plot(x ='Distance', y='convergent', kind = 'scatter',c='Peak Heights', colormap='jet')
plot_distance_divergent=df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].plot(x ='Distance', y='divergent', kind = 'scatter',c='Peak Heights', colormap='jet')
#plot_distance=df_orientation_distance.plot(x = ['Distance','Distance','Distance','Distance'], y=['divergent','convergent','TF1-TF2','TF2-TF1'],kind='scatter')
ax1 = df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].plot(kind='scatter', x='Distance', y='TF1-TF2', color='r')
ax2 = df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].plot(kind='scatter', x='Distance', y='TF2-TF1', color='g', ax=ax1)
ax3 = df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].plot(kind='scatter', x='Distance', y='divergent', color='b', ax=ax1)
ax4 = df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].plot(kind='scatter', x='Distance', y='convergent', color='y', ax=ax1)
ax1.set_ylabel("orientation")
#print(ax1 == ax2 == ax3 == ax4)
```
#### how many of the top 50 of each orentaion have a preferential distance
```
df_top50_T1_TF2=df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].sort_values(by=['TF1-TF2'], ascending=False).head(50)
plot_top50_distance_T1_TF2=df_top50_T1_TF2.plot(x ='Distance', y='Peak Heights', kind = 'scatter', c='TF1-TF2', colormap='jet')
df_top50_T2_TF1=df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].sort_values(by=['TF2-TF1'], ascending=False).head(50)
plot_top50_distance_T2_TF1=df_top50_T2_TF1.plot(x ='Distance', y='Peak Heights', kind = 'scatter', c='TF2-TF1', colormap='jet')
#df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].sort_values(by=['TF2-TF1'], ascending=False).head(50)
df_top50_convergent=df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].sort_values(by=['convergent'], ascending=False).head(50)
plot_top50_distance_convergent=df_top50_T1_TF2.plot(x ='Distance', y='Peak Heights', kind = 'scatter', c='convergent', colormap='jet')
df_top50_divergent=df_orientation_distance[(df_orientation_distance["Peak Heights"]>2.8)].sort_values(by=['divergent'], ascending=False).head(50)
plot_top50_distance_divergent=df_top50_divergent.plot(x ='Distance', y='Peak Heights', kind = 'scatter', c='divergent', colormap='jet')
```
| github_jupyter |
```
%matplotlib inline
import math,sys,os,numpy as np
from numpy.linalg import norm
from PIL import Image
from matplotlib import pyplot as plt, rcParams, rc
from scipy.ndimage import imread
from skimage.measure import block_reduce
import cPickle as pickle
from scipy.ndimage.filters import correlate, convolve
from ipywidgets import interact, interactive, fixed
from ipywidgets.widgets import *
rc('animation', html='html5')
rcParams['figure.figsize'] = 3, 6
%precision 4
np.set_printoptions(precision=4, linewidth=100)
"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")
images, labels = mnist.train.images, mnist.train.labels
images = images.reshape((55000,28,28))
np.savez_compressed("MNIST_data/train", images=images, labels=labels)
"""
1
def plots(ims, interp=False, titles=None):
ims=np.array(ims)
mn,mx=ims.min(),ims.max()
f = plt.figure(figsize=(12,24))
for i in range(len(ims)):
sp=f.add_subplot(1, len(ims), i+1)
if not titles is None: sp.set_title(titles[i], fontsize=18)
plt.imshow(ims[i], interpolation=None if interp else 'none', vmin=mn,vmax=mx)
def plot(im, interp=False):
f = plt.figure(figsize=(3,6), frameon=True)
plt.imshow(im, interpolation=None if interp else 'none')
plt.gray()
plt.close()
data = np.load("MNIST_data/train.npz")
images=data['images']
labels=data['labels']
n=len(images)
images.shape
plot(images[0])
labels[0]
plots(images[:5], titles=labels[:5])
top=[[-1,-1,-1],
[ 1, 1, 1],
[ 0, 0, 0]]
plot(top)
r=(0,28)
def zoomim(x1=0,x2=28,y1=0,y2=28):
plot(images[0,y1:y2,x1:x2])
w=interactive(zoomim, x1=r,x2=r,y1=r,y2=r)
w
k=w.kwargs
dims = np.index_exp[k['y1']:k['y2']:1,k['x1']:k['x2']]
images[0][dims]
corrtop = correlate(images[0], top)
corrtop[dims]
plot(corrtop[dims])
plot(corrtop)
np.rot90(top, 1)
convtop = convolve(images[0], np.rot90(top,2))
plot(convtop)
np.allclose(convtop, corrtop)
straights=[np.rot90(top,i) for i in range(4)]
plots(straights)
br=[[ 0, 0, 1],
[ 0, 1,-1.5],
[ 1,-1.5, 0]]
diags = [np.rot90(br,i) for i in range(4)]
plots(diags)
rots = straights + diags
corrs = [correlate(images[0], rot) for rot in rots]
plots(corrs)
def pool(im): return block_reduce(im, (7,7), np.max)
plots([pool(im) for im in corrs])
eights=[images[i] for i in xrange(n) if labels[i]==8]
ones=[images[i] for i in xrange(n) if labels[i]==1]
plots(eights[:5])
plots(ones[:5])
pool8 = [np.array([pool(correlate(im, rot)) for im in eights]) for rot in rots]
len(pool8), pool8[0].shape
plots(pool8[0][0:5])
def normalize(arr): return (arr-arr.mean())/arr.std()
filts8 = np.array([ims.mean(axis=0) for ims in pool8])
filts8 = normalize(filts8)
plots(filts8)
pool1 = [np.array([pool(correlate(im, rot)) for im in ones]) for rot in rots]
filts1 = np.array([ims.mean(axis=0) for ims in pool1])
filts1 = normalize(filts1)
plots(filts1)
def pool_corr(im): return np.array([pool(correlate(im, rot)) for rot in rots])
plots(pool_corr(eights[0]))
def sse(a,b): return ((a-b)**2).sum()
def is8_n2(im): return 1 if sse(pool_corr(im),filts1) > sse(pool_corr(im),filts8) else 0
sse(pool_corr(eights[0]), filts8), sse(pool_corr(eights[0]), filts1)
[np.array([is8_n2(im) for im in ims]).sum() for ims in [eights,ones]]
[np.array([(1-is8_n2(im)) for im in ims]).sum() for ims in [eights,ones]]
def n1(a,b): return (np.fabs(a-b)).sum()
def is8_n1(im): return 1 if n1(pool_corr(im),filts1) > n1(pool_corr(im),filts8) else 0
[np.array([is8_n1(im) for im in ims]).sum() for ims in [eights,ones]]
[np.array([(1-is8_n1(im)) for im in ims]).sum() for ims in [eights,ones]]
```
| github_jupyter |
# NREL MIDC station network
The [Measurement and Instrumentation Data Center (MIDC)](https://midcdmz.nrel.gov/) is operated by NREL and provides irradiance and meteorological data from a number of ground stations in the U.S. The stations vary in quality, with some stations measuring all three components with high-quality instruments and other stations featuring a rotating shadow band pyranometer.
The most notable station is the [Baseline Measurement System (BMS)](https://midcdmz.nrel.gov/apps/sitehome.pl?site=BMS) at NREL's [Solar Radiation Research Laboratory (SRRL)](https://www.nrel.gov/esif/solar-radiation-research-laboratory.html) outside of Denver, Colorado. The BMS features the world's largest collection of operating pyranometers and pyrheliometers. A number of sky imagers, PV reference cells, and spectral radiometers are also located at the site. Instruments at the BMS are cleaned each weekday and frequently calibrated. Thus, due to the large collection of co-located and well maintained instruments, the BMS data is ideal for comparing different types of instruments.
Note, the MIDC includes several inactive stations. Also, several of the active stations are no longer cleaned or calibrated frequently. For these reasons, the SolarStations listing only includes the SRRL BMS, SOLARTAC, and Flatirons M2 sites, as these measures all three irradiance components and are active. See the map below for the locations of the stations.
```
import pandas as pd
from itables import init_notebook_mode, show
from IPython.display import HTML
init_notebook_mode(all_interactive=True)
stations_midc_url = 'https://midcdmz.nrel.gov/apps/data_api_doc.pl?_idtextlist_'
stations = pd.read_csv(stations_midc_url)
stations = stations.rename(columns={
'STATION_ID': 'Station Identifier',
'STATION_FULLNAME': 'Station full name',
'STATION_SHORTNAME': 'Station Abbreviation',
'LATITUDE_DEG': 'Latitude',
'LONGITUDE_DEG': 'Longitude',
'ELEVATION_M': 'Elevation',
'ACTIVE': 'Active'})
del stations['RESERVED']
show(stations)
import folium
from folium import plugins
EsriImagery = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}"
EsriAttribution = "Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community"
# Create Folium map
m = folium.Map(
location=[35, -107],
zoom_start=3, min_zoom=1, max_bounds=True,
control_scale=True, # Adds distance scale in lower left corner
tiles='openstreetmap',
)
# Function for determining station color
def marker_color(row):
if row['Active'] == 1: # active station
color = 'green'
else: # inactive/closed station
color = '#FA8072'
return color
# SRRL, STAC, and UoE has multiple stations with same latitude/longitude
# append the main stations to the end, so they plot ontop
stations_to_plot = stations.\
append(stations[stations['Station Identifier'] == 'UOSMRL']).\
append(stations[stations['Station Identifier'] == 'BMS']).\
append(stations[stations['Station Identifier'] == 'STAC'])
# Add each station to the map
for index, row in stations_to_plot.iterrows():
color = marker_color(row)
folium.CircleMarker(
location=[row['Latitude'], row['Longitude']],
popup=f"{row['Station full name']} ({row['Station Identifier']})",
tooltip=f"{row['Station full name']} ({row['Station Identifier']})",
radius=5, color=color,
fill_color=color, fill=True).add_to(m)
folium.raster_layers.TileLayer(EsriImagery, name='World imagery', attr=EsriAttribution).add_to(m)
folium.LayerControl(position='topright').add_to(m)
# Additional options and plugins
plugins.Fullscreen(position='bottomright').add_to(m) # Add full screen button to map
folium.LatLngPopup().add_to(m) # Show latitude/longitude when clicking on the map
# Create legend
legend_html = """
<div style="position:fixed;
top: 10px;
left: 10px;
width: 120px;
height: 68px;
border:2px solid grey;
z-index: 9999;
background-color:#f2efe9;
font-size:14px;">
<b>Station markers</b><br>
<i class="fa fa-circle-o fa-1x" style="color:green"></i> Active<br>
<i class="fa fa-circle-o fa-1x" style="color:red"></i> Inactive<br>
</div>"""
m.get_root().html.add_child(folium.Element(legend_html)) # Add Legend to map
# Show the map
m
```
## Data retrieval
Data from the MIDC can be retrieved from the MIDC website or using the [MIDC raw data API](https://midcdmz.nrel.gov/apps/data_api_doc.pl).
```{admonition} Note
If you use data from the MIDC in any publication, make sure to cite it. As an example, the citation for the BMS site is:
Andreas, A.; Stoffel, T.; (1981). NREL Solar Radiation Research Laboratory (SRRL): Baseline
Measurement System (BMS); Golden, Colorado (Data); NREL Report No. DA-5500-56488.
http://dx.doi.org/10.5439/1052221
```
Conveniently, the [pvlib-python](https://pvlib-python.readthedocs.io/en/stable/generated/pvlib.iotools.read_midc_raw_data_from_nrel.html#pvlib.iotools.read_midc_raw_data_from_nrel) library features a wrapper around the API making retrieving data a breeze. The use of the function is shown below, demonstrating how to retrieve five days of data from the BMS:
```
import pvlib
data = pvlib.iotools.read_midc_raw_data_from_nrel(
site='BMS', # station identifier
start=pd.Timestamp(2020,6,1),
end=pd.Timestamp(2020,6,5))
data.iloc[:5, 5:10] # show a subset of the data
```
<br>
The retrieved BMS dataset contains numerous instruments measuring the same irradiance component. Let's, for example, compare the global horizontal irradiance (GHI) measured by a high-quality CMP22 pyranometer with that of a low-cost CM3 pyranometer:
```
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=2, figsize=(10,4))
# plot both measurement as a time-series
data[['Global CMP22 (vent/cor) [W/m^2]', 'Global CM3 (cor) [W/m^2]']].plot(
ax=axes[0], alpha=0.8, ylim=[-20, 1500])
# compare measurements with a scatter plot
data.plot.scatter(ax=axes[1], s=1, grid=True,
x='Global CMP22 (vent/cor) [W/m^2]',
y='Global CM3 (cor) [W/m^2]',
xlim=[-20, 1300], ylim=[-20, 1300])
fig.tight_layout()
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
```
# Experimenting with ``slfractals``
Some initialisations:
```
import slfractals as slf
from matplotlib import pyplot as plt
from multiprocessing import Pool
from time import time
xlim = (-2, 1.)
ylim = (-0.9, 0.9)
```
## Generating a grid
```
C1 = slf.get_grid(xlim, ylim, resw=10)
# display(C1)
C2 = slf.get_grid(xlim, ylim, resw=10, ratio="28:16")
# display(C2)
C = slf.get_grid(xlim, ylim, resw=500)
```
## Computing
```
comp = slf.Compute(slf.mandel, max_value=2, max_iter=300)
```
All at once:
```
start = time()
grad, _ = comp(C.flatten())
grad1 = grad.reshape(C.shape)
print("Elapsed time: {}s".format(time() - start))
%matplotlib inline
plt.imshow(grad1)
```
Split and join, serially:
```
start = time()
spl = slf.split(C.flatten(), nportions=4)
grads, comps2 = zip(*map(comp, spl))
grad2 = slf.join_shape(grads, *C.shape)
print("Elapsed time: {}s".format(time() - start))
%matplotlib inline
plt.imshow(grad2)
```
Split and join, with ``Pool``:
```
start = time()
with Pool(2) as p:
res = p.map(comp, slf.split(C.flatten(), nportions=4))
grads, comps3 = zip(*res)
grad3 = slf.join_shape(grads, *C.shape)
print("Elapsed time: {}s".format(time() - start))
%matplotlib inline
plt.imshow(grad3)
```
Generating a new compute object for every batch:
```
start = time()
with Pool(2) as p:
res = p.map(slf.Compute(slf.mandel, max_value=5, max_iter=300), slf.split(C.flatten(), nportions=4))
grads, comps4 = zip(*res)
grad4 = slf.join_shape(grads, *C.shape)
print("Elapsed time: {}s".format(time() - start))
%matplotlib inline
plt.imshow(grad4)
```
* To compute in parallel pool, the compute object is pickled and thus copied to each separate thread, resulting in a different memory id compared to the original (``comp3``)
* If we create a new compute object directly in ``Pool.map``, then of course we have another id from the beginning. (``comp4``)
```
print("Original: {}".format(id(comp)))
print("comps2 : {}".format(list(map(id, comps2))))
print("comps3 : {}".format(list(map(id, comps3))))
print("comps4 : {}".format(list(map(id, comps4))))
```
Functions to compute serially or in parallel:
```
start = time()
grad_serial = slf.serial_compute(
slf.mandel,
C,
max_iter=300,
max_value=5,
colorexp=2
)
print("Serial time: {}s".format(time() - start))
start = time()
grad_parallel = slf.parallel_compute(
slf.mandel,
C,
max_iter=300,
max_value=5,
colorexp=2,
nproc=2
)
print("Parallel time: {}s".format(time() - start))
%matplotlib inline
ax1 = plt.subplot(121)
ax1.imshow(grad_serial)
ax2 = plt.subplot(122)
ax2.imshow(grad_parallel)
```
## Plotting with Bokeh
```
from bokeh.io import show, output_notebook, push_notebook
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.palettes import inferno
output_notebook()
from slfractals.colors import ColorGradient
cg = ColorGradient(["#000000", "#ff0000", "00ff00"])
p = figure(
title="fractal",
plot_width=grad_parallel.shape[1],
plot_height=grad_parallel.shape[0],
x_range=xlim,
y_range=ylim
)
cds = ColumnDataSource(data={"image": [0.1*grad_parallel]})
img = p.image(image="image", source=cds, x=xlim[0], y=ylim[0], dw=xlim[1]-xlim[0], dh=ylim[1]-ylim[0], palette=cg(100))
nh = show(p, notebook_handle=True)
p = figure(
title="fractal",
plot_width=grad_parallel.shape[1],
plot_height=grad_parallel.shape[0],
x_range=xlim,
y_range=ylim
)
cds = ColumnDataSource(
data={
"image": [grad_parallel],
"x": [xlim[0]],
"y": [ylim[0]],
"dw": [xlim[1]-xlim[0]],
"dh": [ylim[1]-ylim[0]]
}
)
img = p.image(image="image", source=cds, x="x", y="y", dw="dw", dh="dh", palette=inferno(100))
show(p)
push_notebook(handle=nh)
cds.data["image"] = [0.0*grad_parallel]
cds.data = {"image": [0.0*grad_parallel]}
p.plot_width
from bokeh.palettes import Blues256, inferno
C.shape
```
| github_jupyter |
# <center> HW 2: Reproducibility <center>
<br>
<center> Ryan Guajardo and Swetha Pillai <center>
# <center> Brief Overview<center>
### <center> Can we define cell state just by looking at MYH6/7? <center>
Workflow...
1. We have some cells.
2. We want to characterize how gene expression is related to organization within the cell.
- Cells tend towards organization as they mature, D18 vs. D32
3. We classify the organization of a cell with a linear model, Ridge Regression.
- Combined Organizational Score: COS, 11 features, 1-5.
- Ground truth being 2 experts manually deciding cell organization.
4. We then look at MYH6/7, attempt to make a connection to cell organization.
<center> <img src="img2.png" width='300'/> </center>
# <center> Our Results <center>
#### <center>72 Manuscript Figures<center>
<center> <img src="fig_2f_heatmap_rankby_Fraction cell area organized z-disks.png" width='700'/> </center>
In the previous slide, the figure shows the quantitative feature map and different characteristics of the cell.
<center> <img src="fig_3g_regular_stripes_vs_global_alignment.png" width='1000'/> </center>
In the previous slide, the legend is inaccurate and does not resemble the one from the paper.
<center> <img src="fig_4b_linear_model_feature_importances.png" width='1200'/> </center>
In the previous slide, the figure shows the weight coefficients of the regression model for the each feature. From studying this further, we learned about the multicollinearity of ridge regression. Multiple models are trained to combat the multicollinear data where linearly independent features are strongly correlated. This is why there are multiple coefficients per feature.
<center> <img src="fig4d_linear_model_distplot_grouped_by_human_score.png" width='1000'/> </center>
In the previous slide, the legend is cut off and does not resemble the one from the paper.
<center> <img src="supp_fig2i_expert_by_day18_32.png" width='1200'/> </center>
In the previous slide, the figure shows a good representation of the distribution of the data, in which we are able to see the cell counts per score for each cell group age. The score with respect to this paper is the level of cell organization.
<center> <img src="supp_fig4c_linear_model_pred_vs_true.png" width='1000'/> </center>
In the previous slide, the figure shows the ground truth label against the predicted label for each cell age group. Ideally, we would want to see most points along the diagonal of this plot, indicating that the predicted label matches the ground truth label and a correct inference was made. This scatterplot isn't necessarily the best visualization for seeing the correctness of the model. Rather a confusion matrix is better at showing this.
## <center> Irreproducible Figures / Workflow <center>
<!-- ### <center>1. Workflow Related <center>
### <center>2. Irreproducible Figures <center> -->
## <center>Workflow A-C <center>
For this to be considered reproducible we would ideally be able to pass new images into this pipeline and get cell contours, extract cell features, and get predictions from their code.
<br>
All of the code to accomplish this workflow is provided, however understanding the way that code functions is not.
<center> <img src="paper1.jpg" width='800'/> </center>
## <center>Workflow D-G<center>
<center> <img src="paper2.jpg" width='1000'/> </center>
## <center>Figures Consisting of Multiple Overlaid Images<center>
Quilt package contains 10 images in one tiff file.
<br>
It wasn’t immediately clear to me how they were able to assemble these images into the figures we see here.
<br>
Could be my lack of python knowledge.
<center> <img src="img1.png" width='500'/> </center>
In the previous slide, this workflow or end-to-end pipeline would have been beneficial to see. Most all of what we were able to reproduce were plots of the data itself. However, a major part of this paper was the machine learning task. The training, scoring, segmenting, and predicting of the model was a crucial part and was what we were most interested in. Had we been able to understand this, we would be able to pass in some data and produce predictions. None of the published code allows us to accomplish this. We tried manipulating the files to achieve this to no avail.
# <center> Drawbacks <center>
- poor documentation
- no docstrings or comments -> super confusing
- many nested files and functions -> too much back and forth
- can get plots for provided dataframes only.
- no cell images, contours, COS + contours, MYH6/7
- no legend for some images
- have file path but can't read in single image from Quilt
- corrupted images?
- each image is actually 10 images, what kind of preprocessing is required to use them?
- how to predict on new data
- dataframes already filled with predicted labels
We found .tiff files that show cell images stitched together. Although they lack labels, legends, or any sort of documentation, it was cool to see. We tried to retrieve the .tiff files for a long time via Quilt, but it seems like Quilt only supports full package imports and not individual images, which is very annoying.
# <center> Time Spent <center>
- reading the paper: 2-3 hours
- sifting through files: 1-2 hours
- running the notebook to produce plots: 30 min
- further exploration + reproducing non-manuscript "cell image" figures: 8 hours
# <center> Conclusion <center>
- very easy to produce the plots from the notebook
- could not reproduce cell images
- could not reproduce segmentation images
- could not carry out model predictions
Rating: 6/10 :(
<br>
<br>
...i.e. hard to reproduce
- could have file outlining ML workflow
- could have file showing how to produce cell images
- could have a proper README/documentation
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import glob
import nibabel as nib
import os
import time
import pandas as pd
import numpy as np
import cv2
from skimage.transform import resize
from mricode.utils import log_textfile, createPath, data_generator
from mricode.utils import copy_colab
from mricode.utils import return_iter
from mricode.utils import return_csv
from mricode.config import config
from mricode.models.DenseNet_normal_hierach import MyDenseNet
import tensorflow as tf
from tensorflow.keras.layers import Conv3D
from tensorflow import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.utils import conv_utils
tf.__version__
tf.test.is_gpu_available()
path_output = './output/'
path_tfrecords = '/data2/res64/down/'
path_csv = '/data2/csv/'
filename_res = {'train': 'intell_residual_train.csv', 'val': 'intell_residual_valid.csv', 'test': 'intell_residual_test.csv'}
filename_final = filename_res
sample_size = 'site16_allimages'
batch_size = 8
onlyt1 = False
Model = MyDenseNet
versionkey = 'down64' #down256, cropped128, cropped64, down64
modelname = 'new_hiearch_densenet_allimages_' + versionkey
createPath(path_output + modelname)
train_df, val_df, test_df, norm_dict = return_csv(path_csv, filename_final, False)
train_iter = config[versionkey]['iter_train']
val_iter = config[versionkey]['iter_val']
test_iter = config[versionkey]['iter_test']
t1_mean = config[versionkey]['norm']['t1'][0]
t1_std= config[versionkey]['norm']['t1'][1]
t2_mean=config[versionkey]['norm']['t2'][0]
t2_std=config[versionkey]['norm']['t2'][1]
ad_mean=config[versionkey]['norm']['ad'][0]
ad_std=config[versionkey]['norm']['ad'][1]
fa_mean=config[versionkey]['norm']['fa'][0]
fa_std=config[versionkey]['norm']['fa'][1]
md_mean=config[versionkey]['norm']['md'][0]
md_std=config[versionkey]['norm']['md'][1]
rd_mean=config[versionkey]['norm']['rd'][0]
rd_std=config[versionkey]['norm']['rd'][1]
norm_dict
cat_cols = {'female': 2, 'race.ethnicity': 5, 'high.educ_group': 4, 'income_group': 8, 'married': 6}
num_cols = [x for x in list(val_df.columns) if '_norm' in x]
def calc_loss_acc(out_loss, out_acc, y_true, y_pred, cat_cols, num_cols, norm_dict):
for col in num_cols:
tmp_col = col
tmp_std = norm_dict[tmp_col.replace('_norm','')]['std']
tmp_y_true = tf.cast(y_true[col], tf.float32).numpy()
tmp_y_pred = np.squeeze(y_pred[col].numpy())
if not(tmp_col in out_loss):
out_loss[tmp_col] = np.sum(np.square(tmp_y_true-tmp_y_pred))
else:
out_loss[tmp_col] += np.sum(np.square(tmp_y_true-tmp_y_pred))
if not(tmp_col in out_acc):
out_acc[tmp_col] = np.sum(np.square((tmp_y_true-tmp_y_pred)*tmp_std))
else:
out_acc[tmp_col] += np.sum(np.square((tmp_y_true-tmp_y_pred)*tmp_std))
for col in list(cat_cols.keys()):
tmp_col = col
if not(tmp_col in out_loss):
out_loss[tmp_col] = tf.keras.losses.SparseCategoricalCrossentropy()(tf.squeeze(y_true[col]), tf.squeeze(y_pred[col])).numpy()
else:
out_loss[tmp_col] += tf.keras.losses.SparseCategoricalCrossentropy()(tf.squeeze(y_true[col]), tf.squeeze(y_pred[col])).numpy()
if not(tmp_col in out_acc):
out_acc[tmp_col] = tf.reduce_sum(tf.dtypes.cast((y_true[col] == tf.argmax(y_pred[col], axis=-1)), tf.float32)).numpy()
else:
out_acc[tmp_col] += tf.reduce_sum(tf.dtypes.cast((y_true[col] == tf.argmax(y_pred[col], axis=-1)), tf.float32)).numpy()
return(out_loss, out_acc)
def format_output(out_loss, out_acc, n, cols, print_bl=False):
loss = 0
acc = 0
output = []
for col in cols:
output.append([col, out_loss[col]/n, out_acc[col]/n])
loss += out_loss[col]/n
acc += out_acc[col]/n
df = pd.DataFrame(output)
df.columns = ['name', 'loss', 'acc']
if print_bl:
print(df)
return(loss, acc, df)
@tf.function
def train_step(X, y, model, optimizer, cat_cols, num_cols):
with tf.GradientTape() as tape:
predictions = model(X)
i = 0
loss = tf.keras.losses.MSE(tf.cast(y[num_cols[i]], tf.float32), tf.squeeze(predictions[num_cols[i]]))
for i in range(1,len(num_cols)):
loss += tf.keras.losses.MSE(tf.cast(y[num_cols[i]], tf.float32), tf.squeeze(predictions[num_cols[i]]))
for col in list(cat_cols.keys()):
loss += tf.keras.losses.SparseCategoricalCrossentropy()(tf.squeeze(y[col]), tf.squeeze(predictions[col]))
gradients = tape.gradient(loss, model.trainable_variables)
mean_std = [x.name for x in model.non_trainable_variables if ('batch_norm') in x.name and ('mean' in x.name or 'variance' in x.name)]
with tf.control_dependencies(mean_std):
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return(y, predictions, loss)
@tf.function
def test_step(X, y, model):
predictions = model(X)
return(y, predictions)
def epoch(data_iter, df, model, optimizer, cat_cols, num_cols, norm_dict):
out_loss = {}
out_acc = {}
n = 0.
n_batch = 0.
total_time_dataload = 0.
total_time_model = 0.
start_time = time.time()
for batch in data_iter:
total_time_dataload += time.time() - start_time
start_time = time.time()
t1 = (tf.cast(batch['t1'], tf.float32)-t1_mean)/t1_std
t2 = (batch['t2']-t2_mean)/t2_std
if False:
ad = batch['ad']
ad = tf.where(tf.math.is_nan(ad), tf.zeros_like(ad), ad)
ad = (ad-ad_mean)/ad_std
fa = batch['fa']
fa = tf.where(tf.math.is_nan(fa), tf.zeros_like(fa), fa)
fa = (fa-fa_mean)/fa_std
md = batch['md']
md = tf.where(tf.math.is_nan(md), tf.zeros_like(md), md)
md = (md-md_mean)/md_std
rd = batch['rd']
rd = tf.where(tf.math.is_nan(rd), tf.zeros_like(rd), rd)
rd = (rd-rd_mean)/rd_std
subjectid = decoder(batch['subjectid'])
y = get_labels(df, subjectid, list(cat_cols.keys())+num_cols)
#X = tf.concat([t1], axis=4)
X = tf.concat([t1, t2], axis=4)
if optimizer != None:
y_true, y_pred, loss = train_step(X, y, model, optimizer, cat_cols, num_cols)
else:
y_true, y_pred = test_step(X, y, model)
out_loss, out_acc = calc_loss_acc(out_loss, out_acc, y_true, y_pred, cat_cols, num_cols, norm_dict)
n += X.shape[0]
n_batch += 1
if (n_batch % 10) == 0:
log_textfile(path_output + modelname + '/log' + '.log', str(n_batch))
total_time_model += time.time() - start_time
start_time = time.time()
return (out_loss, out_acc, n, total_time_model, total_time_dataload)
def get_labels(df, subjectid, cols = ['nihtbx_fluidcomp_uncorrected_norm']):
subjects_df = pd.DataFrame(subjectid)
result_df = pd.merge(subjects_df, df, left_on=0, right_on='subjectkey', how='left')
output = {}
for col in cols:
output[col] = np.asarray(result_df[col].values)
return output
def best_val(df_best, df_val, df_test, e):
df_best = pd.merge(df_best, df_val, how='left', left_on='name', right_on='name')
df_best = pd.merge(df_best, df_test, how='left', left_on='name', right_on='name')
df_best.loc[df_best['best_loss_val']>=df_best['cur_loss_val'], 'best_loss_epochs'] = e
df_best.loc[(df_best['best_acc_val']<=df_best['cur_acc_val'])&(df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'best_acc_epochs'] = e
df_best.loc[df_best['best_loss_val']>=df_best['cur_loss_val'], 'best_loss_test'] = df_best.loc[df_best['best_loss_val']>=df_best['cur_loss_val'], 'cur_loss_test']
df_best.loc[df_best['best_loss_val']>=df_best['cur_loss_val'], 'best_loss_val'] = df_best.loc[df_best['best_loss_val']>=df_best['cur_loss_val'], 'cur_loss_val']
df_best.loc[(df_best['best_acc_val']<=df_best['cur_acc_val'])&(df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'best_acc_test'] = df_best.loc[(df_best['best_acc_val']<=df_best['cur_acc_val'])&(df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'cur_acc_test']
df_best.loc[(df_best['best_acc_val']<=df_best['cur_acc_val'])&(df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'best_acc_val'] = df_best.loc[(df_best['best_acc_val']<=df_best['cur_acc_val'])&(df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'cur_acc_val']
df_best.loc[(df_best['best_acc_val']>=df_best['cur_acc_val'])&(~df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'best_acc_test'] = df_best.loc[(df_best['best_acc_val']>=df_best['cur_acc_val'])&(~df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'cur_acc_test']
df_best.loc[(df_best['best_acc_val']>=df_best['cur_acc_val'])&(~df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'best_acc_val'] = df_best.loc[(df_best['best_acc_val']>=df_best['cur_acc_val'])&(~df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'cur_acc_val']
df_best = df_best.drop(['cur_loss_val', 'cur_acc_val', 'cur_loss_test', 'cur_acc_test'], axis=1)
return(df_best)
decoder = np.vectorize(lambda x: x.decode('UTF-8'))
template = 'Epoch {0}, Loss: {1:.3f}, Accuracy: {2:.3f}, Val Loss: {3:.3f}, Val Accuracy: {4:.3f}, Time Model: {5:.3f}, Time Data: {6:.3f}'
for col in [0]:
log_textfile(path_output + modelname + '/log' + '.log', cat_cols)
log_textfile(path_output + modelname + '/log' + '.log', num_cols)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(lr = 0.001)
model = Model(cat_cols, num_cols)
df_best = None
for e in range(20):
log_textfile(path_output + modelname + '/log' + '.log', 'Epochs: ' + str(e))
loss = tf.Variable(0.)
acc = tf.Variable(0.)
val_loss = tf.Variable(0.)
val_acc = tf.Variable(0.)
test_loss = tf.Variable(0.)
test_acc = tf.Variable(0.)
tf.keras.backend.set_learning_phase(True)
train_out_loss, train_out_acc, n, time_model, time_data = epoch(train_iter, train_df, model, optimizer, cat_cols, num_cols, norm_dict)
tf.keras.backend.set_learning_phase(False)
val_out_loss, val_out_acc, n, _, _ = epoch(val_iter, val_df, model, None, cat_cols, num_cols, norm_dict)
test_out_loss, test_out_acc, n, _, _ = epoch(test_iter, test_df, model, None, cat_cols, num_cols, norm_dict)
loss, acc, _ = format_output(train_out_loss, train_out_acc, n, list(cat_cols.keys())+num_cols)
val_loss, val_acc, df_val = format_output(val_out_loss, val_out_acc, n, list(cat_cols.keys())+num_cols, print_bl=False)
test_loss, test_acc, df_test = format_output(test_out_loss, test_out_acc, n, list(cat_cols.keys())+num_cols, print_bl=False)
df_val.columns = ['name', 'cur_loss_val', 'cur_acc_val']
df_test.columns = ['name', 'cur_loss_test', 'cur_acc_test']
if e == 0:
df_best = pd.merge(df_test, df_val, how='left', left_on='name', right_on='name')
df_best['best_acc_epochs'] = 0
df_best['best_loss_epochs'] = 0
df_best.columns = ['name', 'best_loss_test', 'best_acc_test', 'best_loss_val', 'best_acc_val', 'best_acc_epochs', 'best_loss_epochs']
df_best = best_val(df_best, df_val, df_test, e)
print(df_best[['name', 'best_loss_test', 'best_acc_test']])
print(df_best[['name', 'best_loss_val', 'best_acc_val']])
log_textfile(path_output + modelname + '/log' + '.log', template.format(e, loss, acc, val_loss, val_acc, time_model, time_data))
if e in [10, 15]:
optimizer.lr = optimizer.lr/3
log_textfile(path_output + modelname + '/log' + '.log', 'Learning rate: ' + str(optimizer.lr))
df_best.to_csv(path_output + modelname + '/df_best' + str(e) + '.csv')
df_best.to_csv(path_output + modelname + '/df_best' + '.csv')
#model.save_weights(path_output + modelname + '/checkpoints/' + str(e) + '/')
error
test_loss, test_acc, df_test = format_output(test_out_loss, test_out_acc, n, list(cat_cols.keys())+num_cols, print_bl=False)
df_test.to_csv('final_output_all.csv')
inputs = tf.keras.Input(shape=(64,64,64,2), name='inputlayer123')
a = model(inputs)['female']
mm = tf.keras.models.Model(inputs=inputs, outputs=a)
from tf_explain.core.smoothgrad import SmoothGrad
import pickle
explainer = SmoothGrad()
output_grid = {}
output_n = {}
for i in range(2):
output_grid[i] = np.zeros((64,64,64))
output_n[i] = 0
counter = 0
for batch in test_iter:
counter+=1
print(counter)
t1 = (tf.cast(batch['t1'], tf.float32)-t1_mean)/t1_std
t2 = (batch['t2']-t2_mean)/t2_std
X = tf.concat([t1, t2], axis=4)
subjectid = decoder(batch['subjectid'])
y = get_labels(test_df, subjectid, list(cat_cols.keys())+num_cols)
y_list = list(y['female'])
for i in range(X.shape[0]):
X_i = X[i]
X_i = tf.expand_dims(X_i, axis=0)
y_i = y_list[i]
grid = explainer.explain((X_i, _), mm, y_i, 20, 1.)
output_grid[y_i] += grid
output_n[y_i] += 1
pickle.dump([output_grid, output_n], open( "smoothgrad_female_all.p", "wb" ) )
#output_grid, output_n = pickle.load(open( "smoothgrad_female.p", "rb" ))
def apply_grey_patch(image, top_left_x, top_left_y, top_left_z, patch_size):
"""
Replace a part of the image with a grey patch.
Args:
image (numpy.ndarray): Input image
top_left_x (int): Top Left X position of the applied box
top_left_y (int): Top Left Y position of the applied box
patch_size (int): Size of patch to apply
Returns:
numpy.ndarray: Patched image
"""
patched_image = np.array(image, copy=True)
patched_image[
top_left_x : top_left_x + patch_size, top_left_y : top_left_y + patch_size, top_left_z : top_left_z + patch_size, :
] = 0
return patched_image
import math
def get_sensgrid(image, mm, class_index, patch_size):
sensitivity_map = np.zeros((
math.ceil(image.shape[0] / patch_size),
math.ceil(image.shape[1] / patch_size),
math.ceil(image.shape[2] / patch_size)
))
for index_z, top_left_z in enumerate(range(0, image.shape[2], patch_size)):
patches = [
apply_grey_patch(image, top_left_x, top_left_y, top_left_z, patch_size)
for index_x, top_left_x in enumerate(range(0, image.shape[0], patch_size))
for index_y, top_left_y in enumerate(range(0, image.shape[1], patch_size))
]
coordinates = [
(index_y, index_x)
for index_x, _ in enumerate(range(0, image.shape[0], patch_size))
for index_y, _ in enumerate(range(0, image.shape[1], patch_size))
]
predictions = mm.predict(np.array(patches), batch_size=1)
target_class_predictions = [prediction[class_index] for prediction in predictions]
for (index_y, index_x), confidence in zip(coordinates, target_class_predictions):
sensitivity_map[index_y, index_x, index_z] = 1 - confidence
sm = resize(sensitivity_map, (64,64,64))
heatmap = (sm - np.min(sm)) / (sm.max() - sm.min())
return(heatmap)
output_grid = {}
output_n = {}
for i in range(2):
output_grid[i] = np.zeros((64,64,64))
output_n[i] = 0
counter = 0
for batch in test_iter:
counter+=1
print(counter)
t1 = (tf.cast(batch['t1'], tf.float32)-t1_mean)/t1_std
t2 = (batch['t2']-t2_mean)/t2_std
X = tf.concat([t1, t2], axis=4)
subjectid = decoder(batch['subjectid'])
y = get_labels(test_df, subjectid, list(cat_cols.keys())+num_cols)
y_list = list(y['female'])
for i in range(X.shape[0]):
print(i)
X_i = X[i]
y_i = y_list[i]
grid = get_sensgrid(X_i, mm, y_i, 4)
output_grid[y_i] += grid
output_n[y_i] += 1
if counter==6:
break
pickle.dump([output_grid, output_n], open( "heatmap_female_all.p", "wb" ) )
error
batch = next(iter(train_iter))
t1 = (tf.cast(batch['t1'], tf.float32)-t1_mean)/t1_std
t2 = (batch['t2']-t2_mean)/t2_std
ad = batch['ad']
ad = tf.where(tf.math.is_nan(ad), tf.zeros_like(ad), ad)
ad = (ad-ad_mean)/ad_std
fa = batch['fa']
fa = tf.where(tf.math.is_nan(fa), tf.zeros_like(fa), fa)
fa = (fa-fa_mean)/fa_std
md = batch['md']
md = tf.where(tf.math.is_nan(md), tf.zeros_like(md), md)
md = (md-md_mean)/md_std
rd = batch['rd']
rd = tf.where(tf.math.is_nan(rd), tf.zeros_like(rd), rd)
rd = (rd-rd_mean)/rd_std
#subjectid = decoder(batch['subjectid'])
#y = get_labels(df, subjectid, list(cat_cols.keys())+num_cols)
#X = tf.concat([t1, t2, ad, fa, md, rd], axis=4)
X = tf.concat([t1, t2], axis=4)
tf.keras.backend.set_learning_phase(True)
model(X)['female']
tf.keras.backend.set_learning_phase(False)
model(X)['female']
mean_std = [x.name for x in model.non_trainable_variables if ('batch_norm') in x.name and ('mean' in x.name or 'variance' in x.name)]
model = Model(cat_cols, num_cols)
model.non_trainable_variables
```
| github_jupyter |
<a href="https://colab.research.google.com/github/martin-fabbri/colab-notebooks/blob/master/gnn/nb2_code_challenge_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### Before you start: don't forget to change your runtime
You might need to change the run time to "GPU" in order to run your code successfully (under Runtime -> Change runtime type).
### Code Challenge Question
Using the `pubmed_dataset_student.h5` that has been been randomly split into train, val and test, modify the existing `GAT` archicture to get 80% accuracy. Bonus points for anyone that manages to get over 80% accuracy!
HINT: Experiment with different types of activation functions, neighbourhood sizes and convolutional layers.
```
!pip install -q torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu101.html
!pip install -q torch-sparse -f https://pytorch-geometric.com/whl/torch-1.8.0+cu101.html
!pip install -q torch-geometric
!pip install git+https://github.com/Aggregate-Intellect/tutorial-notebook-utils.git
from ai_notebook_utils import *
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import SGConv, GATConv
seed=99
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
!wget https://ai-notebook-public-assets.s3.amazonaws.com/graphnn/codechallenge/nb2/pubmed_dataset_student.h5
data_pubmed = torch.load('pubmed_dataset_student.h5')
num_classes = len(data_pubmed.y.unique())
data_pubmed.num_nodes
def compute_accuracy(model, data, mask):
# Set the model.training attribute to False
model.eval()
logprob = model(data)
y_pred = logprob[mask].max(1)[1]
y_true=data.y[mask]
acc = y_pred.eq(y_true).sum()/ mask.sum().float()
return acc.item()
def predict(model, data):
#acc_test = compute_accuracy(model, data, data.test_mask)
logprob = model(data)
y_pred = logprob[data.test_mask].max(1)[1]
if torch.cuda.is_available():
y_pred = y_pred.cpu().detach().numpy().reshape(-1,)
else:
y_pred = y_pred.numpy().reshape(-1,)
return y_pred
# TODO: complete your code here
class GATNet(torch.nn.Module):
def __init__(self, data, heads_layer1,
heads_layer2, dropout, dropout_alphas):
pass
def forward(self, data):
pass
def train(model, data, optimizer):
# Set the model.training attribute to True
model.train()
# Reset the gradients of all the variables in a model
optimizer.zero_grad()
# Get the output of the network. The output is a log probability of each
log_softmax = model(data)
labels = data.y # Labels of each node
# Use only the nodes specified by the train_mask to compute the loss.
nll_loss = F.nll_loss(log_softmax[data.train_mask], labels[data.train_mask])
#Computes the gradients of all model parameters used to compute the nll_loss
#Note: These can be listed by looking at model.parameters()
nll_loss.backward()
# Finally, the optimizer looks at the gradients of the parameters
# and updates the parameters with the goal of minimizing the loss.
optimizer.step()
@torch.no_grad() # Decorator to deactivate autograd functionality
def test(model, data):
acc_train = compute_accuracy(model, data, data.train_mask)
acc_val = compute_accuracy(model, data, data.val_mask)
acc_test = compute_accuracy(model, data, data.test_mask)
return acc_train, acc_val, acc_test
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_pubmed= data_pubmed.to(device)
# TODO: pass in necessary parameter for GATNet
model_pubmed_gat = GATNet().to(device)
optimizer = torch.optim.Adam(model_pubmed_gat.parameters(), lr=0.005, weight_decay=1e-3)
for epoch in range(1, 201):
train(model_pubmed_gat, data_pubmed, optimizer)
if epoch %10 ==0:
log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test {:.4f}'
train_acc, val_acc, test_acc = test(model_pubmed_gat,data_pubmed)
#print(train_acc, val_acc, test_acc)
print(log.format(epoch, train_acc, val_acc, test_acc))
# save model test results and send it off to AISC for evaluation
answer = predict(model_pubmed_gat, data_pubmed)
score_answer("graphnn-2020::nb2-codechallenge1::pubmed", answer)
```
| github_jupyter |
# Clean up
❗ This notebook removes all resources created by a data transformation and ingestion project. The following code cells will:
- permanently delete project or projects you provisioned in your Studio environment
- permanently delete feature group or groups
- permanently delete project-provisioned S3 buckets
- permanently delete objects in S3 buckets under project-related prefixes
<div class="alert alert-info"> 💡 <strong> This notebook will delete resources in your AWS account. Please double check the names of resources to be deleted! </strong>
</div>
```
import sagemaker
import boto3
import time
import json
import os
# load environment variables from %store
%store -r
%store
try:
s3_data_prefix
s3_flow_prefix
s3_input_data_prefix
s3_fs_query_output_prefix
except NameError:
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("[ERROR] YOU HAVE TO RUN 00-setup.ipynb and 01-feature-store-ingest-pipeline notebooks")
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
sm = boto3.client("sagemaker")
s3 = boto3.resource('s3')
```
## Delete projects
```
# Get all projects created by the current domain
projects = [
{"ProjectName":p["ProjectName"], "ProjectId":p["ProjectId"]} for p in sm.list_projects(MaxResults=100, SortBy="CreationTime")["ProjectSummaryList"]
if sm.describe_project(ProjectName=p["ProjectName"])["CreatedBy"]["DomainId"] == domain_id and p["ProjectStatus"] == "CreateCompleted"
]
print(f"These projects have been created by domain {domain_id}: {json.dumps(projects, indent=2)}")
# Select projects to be deleted
projects_to_delete = []
for p in projects:
print(f"Are you sure you want to delete this project: {p['ProjectName']}? (y/n)")
choice = input()
if choice == 'y':
projects_to_delete.append(p)
print(f"***************************************")
print(f"The following projects will be deleted:\n{json.dumps(projects_to_delete, indent=2)}")
print(f"***************************************")
```
<div class="alert alert-info"> 💡 <strong> The following code cell will delete all selected projects. All project CodeCommit code repositories and CI/CD CodePipeline pipelines will be deleted! </strong>
</div>
```
for p in projects_to_delete:
try:
print(f"Deleting project {p['ProjectName']}:{sm.delete_project(ProjectName=p['ProjectName'])}")
except Exception:
pass
```
## Delete feature groups
```
feature_groups = sm.list_feature_groups(
FeatureGroupStatusEquals="Created",
SortOrder="Descending",
SortBy="CreationTime"
)["FeatureGroupSummaries"]
feature_groups
# Select feature groups to be deleted
feature_groups_to_delete = []
for fg in feature_groups:
print(f"Are you sure you want to delete this feature group: {fg['FeatureGroupName']}? (y/n)")
choice = input()
if choice == 'y':
feature_groups_to_delete.append(fg["FeatureGroupName"])
print(f"********************************************")
print(f"The following feature groups will be deleted:\n{json.dumps(feature_groups_to_delete, indent=2)}")
print(f"********************************************")
def delete_offline_store(feature_group_name: str):
try:
offline_store_config = sm.describe_feature_group(FeatureGroupName=feature_group_name)['OfflineStoreConfig']
except Exception:
print(f'Feature group: {feature_group_name} does NOT have an offline store!')
return
offline_store_s3_uri = offline_store_config['S3StorageConfig']['ResolvedOutputS3Uri']
print(f"all feature store objects under {offline_store_s3_uri} will be deleted!")
print("Are you sure you want to these objects ? (y/n)")
choice = input()
if choice == 'y':
!aws s3 rm {offline_store_s3_uri} --recursive
```
<div class="alert alert-info"> 💡 <strong> The following code cell will delete the selected feature groups!</strong>
</div>
```
for fg in feature_groups_to_delete:
print(f"Deleting the feature group: {fg}")
delete_offline_store(fg)
sm.delete_feature_group(FeatureGroupName=fg)
```
## Delete project-provisioned S3 buckets
<div class="alert alert-info"> 💡 <strong> The following code cell will delete all S3 buckets created by a project!</strong>
</div>
```
print(f"*****************************************************")
print(f"The following S3 buckets will be removed permanently!")
print(f"*****************************************************")
for p in projects_to_delete:
print(f"sagemaker-cp-{p['ProjectName']}-{p['ProjectId']}")
print(f"sagemaker-ct-{p['ProjectName']}-{p['ProjectId']}")
for p in projects_to_delete:
!aws s3 rb s3://sagemaker-cp-{p['ProjectName']}-{p['ProjectId']} --force
!aws s3 rb s3://sagemaker-ct-{p['ProjectName']}-{p['ProjectId']} --force
```
## Remove project-related objects from S3 data bucket
<div class="alert alert-info"> 💡 <strong> The following code cells will delete all objects under specified S3 prefixes!</strong>
</div>
```
prefixes_to_delete = [
s3_data_prefix,
s3_flow_prefix,
s3_input_data_prefix,
s3_fs_query_output_prefix
]
print(f"************************************************************************")
print(f"All objects under the following S3 prefixes will be removed permanently!")
print(f"************************************************************************")
for p in prefixes_to_delete:
print(p)
for p in prefixes_to_delete:
!aws s3 rm s3://{p} --recursive
```
# Release resources
```
%%html
<p><b>Shutting down your kernel for this notebook to release resources.</b></p>
<button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button>
<script>
try {
els = document.getElementsByClassName("sm-command-button");
els[0].click();
}
catch(err) {
// NoOp
}
</script>
```
Continue with clean up as documented in [README](../README.md#clean-up).
| github_jupyter |
```
import matplotlib.pyplot as plt
def plot_lr_and_accuracy(history, conf):
import seaborn as sns
sns.set()
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
acc = history['sparse_categorical_accuracy']
val_acc = history['val_sparse_categorical_accuracy']
loss = history['loss']
val_loss = history['val_loss']
epochs_range = range(60)
if conf["decay_rate"] > 0:
lr = history['lr']
# Plot the learning rate
plt.figure(figsize=(8, 6))
plt.plot(epochs_range, lr, label='Learning Rate')
plt.xlabel('Epoch')
plt.ylabel('Learnign rate')
plt.title('Learning Rate development during training');
plt.tight_layout()
plt.savefig(conf["log_dir"]+'/learning_rate.pdf', format='pdf')
# Plot train-val accuracy and loss
plt.figure(figsize=(14, 6))
# Subplot 1
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylim([0, 1.01])
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training and Validation Accuracy')
# Subplot 2
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylim([-0.01, 3])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training and Validation Loss')
plt.tight_layout()
plt.savefig(conf["log_dir"]+'/accuracy_and_loss.pdf', format='pdf')
plt.show()
no_schedule_conf = {
"log_dir": ".",
"decay_rate": 0
}
experiment_directory = "/home/henriklg/master-thesis/code/hyper-kvasir/experiments/misc_old/overfit_example"
no_schedule_history = {
"loss": [1.5072544227386344, 0.7790612218708828, 0.6250192923792477, 0.5121105169427806, 0.47008189970049363, 0.42635839740777837, 0.394188962106047, 0.37841374606921757, 0.3476650296100255, 0.3259111303707649, 0.29389441655627613, 0.30275844063224466, 0.26665452842054693, 0.2635612076726453, 0.23953651431305656, 0.22976859890181442, 0.20154377445578575, 0.1805611290037632, 0.1766955211501697, 0.16294705379625846, 0.14493887010833312, 0.12392903363396382, 0.12449626207094767, 0.12019846106654611, 0.09853680939253034, 0.09220942154783628, 0.08217798761124241, 0.08895607967058132, 0.10141584758871589, 0.06739018878353567, 0.0788314595561603, 0.07878443068856823, 0.04671982264724271, 0.0424491838343313, 0.04655713375240307, 0.045301885536775506, 0.03314688705421727, 0.030609465726041073, 0.02400444703505792, 0.02572265391213948, 0.022721807460364854, 0.022230125398672154, 0.034153152698779415, 0.020002304964722935, 0.027791833597112944, 0.03248033320113759, 0.021846247286985403, 0.018239968427224085, 0.011502489237594499, 0.018537745091478052, 0.01818391529778982, 0.01010182930923324, 0.01021367406742527, 0.01677587423859975, 0.024793762778903453, 0.019031021726951966, 0.006122106996340805, 0.017206230188784158, 0.008957777759421552, 0.026934080883150052],
"sparse_categorical_accuracy": [0.5691002, 0.74703664, 0.7909483, 0.8279903, 0.83943963, 0.85775864, 0.86355066, 0.87217134, 0.87796336, 0.88671875, 0.89426184, 0.89426184, 0.9042295, 0.9108297, 0.9139278, 0.9185075, 0.9284752, 0.936153, 0.94019395, 0.94275326, 0.95029634, 0.95743537, 0.9555496, 0.9610722, 0.9676724, 0.9676724, 0.970097, 0.97265625, 0.9686153, 0.9788524, 0.97494614, 0.97481143, 0.9850485, 0.9865302, 0.9853179, 0.9853179, 0.98935884, 0.991514, 0.9931304, 0.9931304, 0.992861, 0.99272627, 0.98949355, 0.99380386, 0.992861, 0.9890894, 0.9932651, 0.9954203, 0.9967672, 0.99501616, 0.99501616, 0.9967672, 0.99609375, 0.99568963, 0.9940733, 0.99393857, 0.99851835, 0.99515086, 0.9975754, 0.99488145],
"val_loss": [3919.4466145833335, 11.65205423037211, 4.360255757967631, 4.273410499095917, 1.3454897205034893, 2.2958114743232727, 0.6566694676876068, 1.7990142504374187, 1.2267223745584488, 1.6769937773545582, 0.6317088603973389, 0.6943292692303658, 0.584413061539332, 0.652853491405646, 0.9270377308130264, 0.5677525550127029, 0.5511963839332262, 0.6477192168434461, 0.9203999936580658, 0.6613056461016337, 0.7751666804154714, 0.6975759491324425, 0.7513672312100729, 0.6839244663715363, 0.7200210466980934, 0.9663039296865463, 0.8110103557507197, 0.8093332896629969, 0.8051123370726904, 0.7844762777288755, 1.004360094666481, 0.9871400992075602, 1.0284421841303508, 0.8175521641969681, 1.1002954989671707, 0.7096163878838221, 0.9926159679889679, 0.9591069966554642, 0.9033658156792322, 0.819308136900266, 0.7499794612328211, 0.8792222191890081, 0.8911004836360613, 0.9291882663965225, 0.9136366893847784, 1.1273022045691807, 0.8832092881202698, 0.9507997334003448, 0.9865448425213496, 0.9420620948076248, 0.9097297737995783, 1.0210385397076607, 1.055717463294665, 1.0009022454420726, 0.9238067418336868, 0.9220342685778936, 1.0081120158235233, 0.8685589333375295, 1.0002639045317967, 0.8850182692209879],
"val_sparse_categorical_accuracy": [0.057942707, 0.08528646, 0.37955728, 0.5722656, 0.7050781, 0.66796875, 0.80338544, 0.66276044, 0.7389323, 0.75651044, 0.8261719, 0.8020833, 0.81966144, 0.81640625, 0.7890625, 0.8489583, 0.85091144, 0.81184894, 0.77994794, 0.8385417, 0.81901044, 0.8339844, 0.8411458, 0.83528644, 0.86588544, 0.8489583, 0.8144531, 0.84244794, 0.8157552, 0.82942706, 0.796875, 0.8170573, 0.83463544, 0.8489583, 0.84244794, 0.86002606, 0.8404948, 0.8261719, 0.85026044, 0.8489583, 0.8619792, 0.8619792, 0.8717448, 0.8652344, 0.8391927, 0.85026044, 0.86002606, 0.86328125, 0.87369794, 0.8730469, 0.8645833, 0.86783856, 0.85286456, 0.8417969, 0.8483073, 0.86653644, 0.8730469, 0.86783856, 0.8723958, 0.86653644]
}
plot_lr_and_accuracy(no_schedule_history, no_schedule_conf)
```
| github_jupyter |
## Learning Objectives
- Create Pandas DataFrame from dictionary
- Explore Pandas methods and see its effect on the original DataFrame
## What is Pandas?
[Pandas](https://pandas.pydata.org/) is an open-source library that provides all kinds of easy-to-use data structures and data analysis tools in Python. It's one of the most popular libraries for data analysis, and is used by numerous companies such as [Instacart](https://tech.instacart.com/jardin-a-dataframe-based-orm-for-python-178e02e1c21) and [Twilio/SendGrid](https://sendgrid.com/blog/benefits-of-python-for-marketers/)!
## Lets create a simple dataframe in Pandas
A **Dataframe** is a 2d dynamically sized data structure. It has a table structure, where the _data_ is stored in _rows_ and _columns_.
- If we pass a Python dictionary as the `data` to the Pandas DataFrame input argument, we can creat a Pandas DataFrame
```
import pandas as pd
df = pd.DataFrame(data= {'name':['john', 'mary', 'peter','jeff','bill', 'lisa'], 'age':[23, 78, 22, 19, 45, 33],
'state': ['iowa', 'oregon', 'california', 'texas', 'washington', 'oregon'], 'num_children': [2, 2, 0, 1, 2, 1],
'num_pets' : [0, 4, 0, 5, 0, 0]})
df.set_index('name', inplace=True)
df
df.columns
```
## If we are only interested in three columns
```
df[['num_pets', 'age', 'state']] #we can select more than one column by providing a list of the column names
```
What if we tried to use "name" as a column name?
# How loc[ ] works
loc [] is a very flexible way to access different rows and columns of your data frame. It is a label based selection and is NOT based on position.
loc[]'s inputs follow this format loc[row selection, column selection]
Allowed inputs are:
- A single label, e.g. 5 or 'a', (note that 5 is interpreted as a label of the index, and never as an integer position along the index).
- A list or array of labels, e.g. ['a', 'b', 'c'].
- A slice object with labels, e.g. 'a':'f'.
if loc[] is used to select more than one row but only one column or only one row but multiple columns it will give us back a [pandas series](https://towardsdatascience.com/pandas-series-a-lightweight-intro-b7963a0d62a2) object which is a one-dimensional array like object with axis labels (including time series).
## Select the row with label john
```
df.loc["john"]
```
## Select the row with label mary and column state
```
df.loc["mary", "state"]
```
### Select the rows from peter to lisa based on columns (state and age for example)
loc[] also allows slicing similar to the slicing notation used for Python arrays and strings
```
df.loc["peter":"lisa", 'state']
df.loc['peter':'lisa', ['state', 'age']]
```
## How iloc[ ] works
iloc[ ] is how you use position based selection, similar to how index based access works in Python lists. It also allows slicing notation.
### Select the first two rows
```
# select the first 2 rows
df.iloc[:2]#this says start at 0 and stop at 2, (stop not included)
```
### Select the last two rows
```
# select the last 2 rows
df.iloc[-2:]
```
### Let's do some more practice, keep an eye on which is being used loc or iloc
### Select rows up to and including the assigned one
```
# select rows up to and including the one
# with label='bill' (this retrieves 5 rows)
df.loc[:"bill"]
```
### Select the first column without specifying the name of the column
```
# first column of data frame
df.iloc[:,0] #this looks weird but it's saying select all rows (no start stop specific)
#and then select the column at position 0
```
### Select all rows from two columns by specifying the names of the columns
```
df.loc[:,['age', 'state']]
# df[['age', 'state']]
```
### Select the second row of dataframe
```
# second row of dataframe
df.iloc[1]
```
## Practice Exercise
Write code to:
1. Select the number of pets and the number of children for jeff
2. Select the rows starting at the 3rd row and up to the last row
3. Select age and number of pets for mary up to bill
```
df.loc["jeff", ["num_pets", "num_children"]]
df.iloc[2:]
df.loc["mary":"bill", ["age","num_pets"]]
```
# Filtering with Conditionals
df[ ] can do more than just take in a column name or a list of column names, it can also take in a boolean series (array) and will give you back the rows where the value is True
### Filter or conditional selection of a dataframe
```
# people whose "age" is greater than 30
df['age'] > 30
df[df['age'] > 30]
```
### Another way of the above selection
we can also refer to columns like properties of the dataframe using . notation
```
df[df.age > 30]
```
### Select dataframe such that the return dataframe has more pets than children
```
# people who have more pets than children
df["num_pets"] > df[ "num_children"]
df[df["num_pets"] > df[ "num_children"]]
```
### Who in the dataframe is older than 40 AND own pets
```
# people older than 40 who own pets
df[ (df["age"] > 40) & (df["num_pets"] > 0) ]
```
### Who in the dataframe is older than 40 OR own pets
```
df[ (df["age"] > 40) | (df["num_pets"] > 0) ]
```
### Drop age and num_children columns
**Note:** a copy of the dataframe will be returned. The original dataframe will **not** be modified by this action
drop can take optional arguments such as aixs and inplace
inplace = True **will** modify the original dataframe
axis can have a value of 0 or 1, 0 means rows, 1 means drop labels from the columns
```
# df itself is not modified; a copy is returned instead
df.drop(["age","num_children"],axis=1)
```
### After droping, the original dataframe is untouched
```
df
```
### Provide various descriptive properties of the dataframe
```
df.describe()
```
### What is the average for age, num_pets and num_children
To do this, we will need the help of [Numpy](https://www.numpy.org/). Numpy is an open-source Python library used for scientific computing and provides the ability to work with high-performance arrays and matrices. Pandas upon funcionality provided by Numpy, so the two are often used together.
### pandas apply
.apply() will apply a function along an axis, 0 = index or rows, 1 = column. The final return type is inferred from the return type of the applied function but it may not be exactly what you expecting unless you supply a specific result type to the result_type optional argument
We will often see apply given a lambda function, lambda functions are just nameless function in python. For example these two ways of specifying a function are the same
<code> def get_mean(col):
return np.mean(col)
lambda col: np.mean(col) </code>
```
# Apply an aggregate function to every column
import numpy as np
df[["age","num_pets","num_children"]].mean() #for some functions pandas already has a provided method
df[["age","num_pets","num_children"]].apply(lambda col: np.mean(col),axis=0) #what is the return type?
df[["age","num_pets","num_children"]].apply(lambda col: np.sum(col),axis=0)
df['age'].sum()
df.sum()
```
### Add numerical values of age, num_pets, num_children
```
df[["age","num_pets","num_children"]].apply(lambda row: np.sum(row),axis=1) #here the axis is the columns
#here we a are summing the values the age, num_pets, and num_children across each row
```
Another cool thing we can do is apply functions to our data and then save the results as new columns
```
df["row_sum"] = df[["age","num_pets","num_children"]].apply(lambda row: np.sum(row),axis=1)
df
```
### Select dataframe based on column name
```
df[['age']] #this returns a dataframe
df['age'] #this returns a series
```
### Apply a function to specific column WITHOUT applying the changes to original dataframe
**Note:** This does not affect the original dataframe object. This returns a mutated copy of the specified column
```
df[["age"]].apply(lambda value: value*2)
df
```
### Apply a function to specific column and apply the changes to original dataframe
```
df['age'] = df['age'].apply(lambda x: x*2)
df
```
### Sort dataframe based on a specific column in ascending order
sort_values() takes one required argument by,
by can be a str or list of strs
if axis is 0 or ‘index’ then by may contain index levels and/or column labels.
if axis is 1 or ‘columns’ then by may contain column levels and/or index labels.
```
# Sort DataFrame by column value
df.sort_values("age", ascending= True)
df #won't actually affect the original dataframe
```
### Select rows whose name begins with the letter 'j'
we can also use lambda functions to perform selections
```
# select rows whose name begins with the letter 'j'
df[df.apply(lambda row: row['name'].startswith('j'),axis=1)]
```
### Create a dataframe from an Ordered Dictionary
An OrderedDict is a dictionary subclass that remembers the order in which its contents are added, supporting the usual dict methods.If a new entry overwrites an existing entry, the original insertion position is left unchanged. Deleting an entry and reinserting it will move it to the end.
```
from collections import OrderedDict
from pandas import DataFrame
import pandas as pd
import numpy as np
table = OrderedDict((
("Item", ['Item0', 'Item0', 'Item1', 'Item1']),
('CType',['Gold', 'Bronze', 'Gold', 'Silver']),
('USD', ['1$', '2$', '3$', '4$']),
('EU', ['1€', '2€', '3€', '4€'])
))
df = DataFrame(table)
df
```
# Lab Challenge
- Find a dataset that you find interesting on kaggle and download it
- Use what we have learned so far to explore the data
- Come up with 2 questions that you can answer with the data. For example, in the pokemon data we could ask how many water and ice types are there
- Once you come up with your questions and data pair up and try to answer someone else's questions with thier dataset
```
import pandas as pd
import numpy as np
df = pd.read_csv("Datasets/tv_shows.csv")
df.dropna(inplace=True)
df
new_dataframe = df[["Title", "Year"]]
new_dataframe
#get all the info for the shows that had a rotten tomatos rating of 96% or higher and were aired from 2019 or later
# def convert(str_percent):
# #print(type(str_percent))
# if type(str_percent) != float:
# str_percent = int(str_percent.rstrip("%"))
# return str_percent
# df["numerical_percent"] = df["Rotten Tomatoes"].apply(convert, 1)
# df[(df["Year"] > 2019) & (df["numerical_percent"] > 96)]
def convert(percent):
#print(type(percent))
percent = int(percent.rstrip("%"))
return percent
df["numerical_percent"] = df["Rotten Tomatoes"].apply(convert, 1)
df[(df["Year"] > 2018) & (df['numerical_percent'] > 95)]
df[(df["Title"].str.match("Star Wars")) & (df["Disney+"] > 0)]
import pandas as pd
df = pd.read_csv("Datasets/villagers.csv")
df
df[df["Birthday"].str.contains("Jul")]["Name"].sort_values()
df[df["Birthday"].str.contains("Jul")]
df[df["Name"] == "Daisy"]
df[df["Species"].str.match("Pig")]["Name"].sort_values()
```
| github_jupyter |
Resources Used
- wget.download('https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/_downloads/da4babe668a8afb093cc7776d7e630f3/generate_tfrecord.py')
- Setup https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/install.html
# 0. Setup Paths
```
WORKSPACE_PATH = 'Tensorflow/workspace'
SCRIPTS_PATH = 'Tensorflow/scripts'
APIMODEL_PATH = 'Tensorflow/models'
ANNOTATION_PATH = WORKSPACE_PATH+'/annotations'
IMAGE_PATH = WORKSPACE_PATH+'/images'
MODEL_PATH = WORKSPACE_PATH+'/models'
PRETRAINED_MODEL_PATH = WORKSPACE_PATH+'/pre-trained-models'
CONFIG_PATH = MODEL_PATH+'/my_ssd_mobnet/pipeline.config'
CHECKPOINT_PATH = MODEL_PATH+'/my_ssd_mobnet/'
```
# 1. Create Label Map
```
labels = [ {'name':'A', 'id':1},
{'name':'B', 'id':2},
{'name':'C', 'id':3},
{'name':'D', 'id':4},
{'name':'E', 'id':5},
{'name':'F', 'id':6},
{'name':'G', 'id':7},
{'name':'H', 'id':8},
{'name':'I', 'id':9},
{'name':'J', 'id':10},
{'name':'K', 'id':11},
{'name':'L', 'id':12},
{'name':'M', 'id':13},
{'name':'N', 'id':14},
{'name':'O', 'id':15},
{'name':'P', 'id':16},
{'name':'Q', 'id':17},
{'name':'R', 'id':18},
{'name':'S', 'id':19},
{'name':'T', 'id':20},
{'name':'U', 'id':21},
{'name':'V', 'id':22},
{'name':'W', 'id':23},
{'name':'X', 'id':24},
{'name':'Y', 'id':25},
{'name':'Z', 'id':26},
{'name':'hello', 'id':27},
{'name':'thank_you', 'id':28},
{'name':'no', 'id':29},
{'name':'need','id':30},
{'name':'help', 'id':31},
{'name':'I_Appreciate', 'id':32},
{'name':'see_you', 'id':33},
{'name':'talk', 'id':34},
{'name':'hurry_up', 'id':35},
{'name':'name', 'id':36},
{'name':'not', 'id':37},
{'name':'when', 'id':38},
{'name':'how', 'id':39},
{'name':'good_morning', 'id':40},
]
labels
with open(ANNOTATION_PATH + '\label_map.pbtxt', 'w') as f:
for label in labels:
f.write('item { \n')
f.write('\tname:\'{}\'\n'.format(label['name']))
f.write('\tid:{}\n'.format(label['id']))
f.write('}\n')
```
# 2. Create TF records
```
import os
import pandas
import tensorflow as tf
!python {SCRIPTS_PATH + '/generate_tfrecord.py'} -x {IMAGE_PATH + '/train'} -l {ANNOTATION_PATH + '/label_map.pbtxt'} -o {ANNOTATION_PATH + '/train.record'}
!python {SCRIPTS_PATH + '/generate_tfrecord.py'} -x {IMAGE_PATH + '/test'} -l {ANNOTATION_PATH + '/label_map.pbtxt'} -o {ANNOTATION_PATH + '/test.record'}
```
# 3. Download TF Models Pretrained Models from Tensorflow Model Zoo
```
!cd Tensorflow && git clone https://github.com/tensorflow/models
#wget.download('http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz')
#!mv ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz {PRETRAINED_MODEL_PATH}
#!cd {PRETRAINED_MODEL_PATH} && tar -zxvf ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz
```
# 4. Copy Model Config to Training Folder
```
CUSTOM_MODEL_NAME = 'my_ssd_mobnet'
!mkdir {'Tensorflow\workspace\models\\'+CUSTOM_MODEL_NAME}
!copy {PRETRAINED_MODEL_PATH+'/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/pipeline.config'} {MODEL_PATH+'/'+CUSTOM_MODEL_NAME}
```
# 5. Update Config For Transfer Learning
```
import tensorflow as tf
from object_detection.utils import config_util
from object_detection.protos import pipeline_pb2
from google.protobuf import text_format
CONFIG_PATH = MODEL_PATH+'/'+CUSTOM_MODEL_NAME+'/pipeline.config'
config = config_util.get_configs_from_pipeline_file(CONFIG_PATH)
config
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.io.gfile.GFile(CONFIG_PATH, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
### CHANGE
len(labels)
pipeline_config.model.ssd.num_classes = len(labels)
pipeline_config.train_config.batch_size = 4
pipeline_config.train_config.fine_tune_checkpoint = PRETRAINED_MODEL_PATH+'/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint/ckpt-0'
pipeline_config.train_config.fine_tune_checkpoint_type = "detection"
pipeline_config.train_input_reader.label_map_path= ANNOTATION_PATH + '/label_map.pbtxt'
pipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [ANNOTATION_PATH + '/train.record']
pipeline_config.eval_input_reader[0].label_map_path = ANNOTATION_PATH + '/label_map.pbtxt'
pipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [ANNOTATION_PATH + '/test.record']
config_text = text_format.MessageToString(pipeline_config)
with tf.io.gfile.GFile(CONFIG_PATH, "wb") as f:
f.write(config_text)
```
# 6. Train the model
```
print("""python {}/research/object_detection/model_main_tf2.py --model_dir={}/{} --pipeline_config_path={}/{}/pipeline.config --num_train_steps=5000""".format(APIMODEL_PATH, MODEL_PATH,CUSTOM_MODEL_NAME,MODEL_PATH,CUSTOM_MODEL_NAME))
```
# 7. Load Train Model From Checkpoint
```
import os
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.builders import model_builder
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(CONFIG_PATH)
detection_model = model_builder.build(model_config=configs['model'], is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(CHECKPOINT_PATH, 'ckpt-6')).expect_partial()
@tf.function
def detect_fn(image):
image, shapes = detection_model.preprocess(image)
prediction_dict = detection_model.predict(image, shapes)
detections = detection_model.postprocess(prediction_dict, shapes)
return detections
```
# 8. Detect in Real-Time
```
import cv2
from PIL import Image
import numpy as np
# import required module
#from PIL import Image
# #get image
#filepath = "Tensorflow/workspace/images/train/A.9d5df640-a228-11ec-91ae-d0abd533d014.jpg"
#img = Image.open(filepath)
## get width and height
#width,height = img.size
# display width and height
#print("The height of the image is: ", height)
#print("The width of the image is: ", width)
category_index = label_map_util.create_category_index_from_labelmap(ANNOTATION_PATH+'/label_map.pbtxt')
# Setup capture
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
while True:
ret, frame = cap.read()
image_np = np.array(frame)
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
label_id_offset = 1
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes']+label_id_offset,
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=3,
min_score_thresh=.9,
agnostic_mode=False)
cv2.imshow('Sign Language detection', cv2.resize(image_np_with_detections, (800, 600)))
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
break
cap.release()
#cv2.destroyAllWindows()
detections = detect_fn(input_tensor)
```
# 9. Save Graph
```
print("""python {}/research/object_detection/exporter_main_v2.py --input_type=image_tensor --pipeline_config_path={}/{}/pipeline.config --trained_checkpoint_dir={} --output_directory={}export""".format(APIMODEL_PATH, MODEL_PATH, CUSTOM_MODEL_NAME,CHECKPOINT_PATH, CHECKPOINT_PATH))
```
# 10. Convert to TFJS model
```
!pip install tensorflowjs
"""tensorflowjs_converter --input_format=tf_saved_model --output_node_names='detection_boxes,detection_classes,detection_features,detection_multiclass_scores,detection_scores,num_detections,raw_detection_boxes,raw_detection_scores' --output_format=tfjs_graph_model --signature_name=serving_default {}export/saved_model {}converted""".format(CHECKPOINT_PATH, CHECKPOINT_PATH)
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Filter/filter_eq.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Filter/filter_eq.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Filter/filter_eq.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Filter/filter_eq.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
```
# %%capture
# !pip install earthengine-api
# !pip install geehydro
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for the first time or if you are getting an authentication error.
```
# ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
states = ee.FeatureCollection('TIGER/2018/States')
selected = states.filter(ee.Filter.eq("NAME", 'California'))
Map.centerObject(selected, 6)
Map.addLayer(ee.Image().paint(selected, 0, 2), {'palette': 'yellow'}, 'Selected')
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
<a href="https://colab.research.google.com/github/intel-analytics/analytics-zoo/blob/master/docs/docs/colab-notebook/orca/quickstart/ncf_dataframe.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>

---
## **Environment Preparation**
**Install Java 8**
Run the cell on the **Google Colab** to install jdk 1.8.
**Note:** if you run this notebook on your computer, root permission is required when running the cell to install Java 8. (You may ignore this cell if Java 8 has already been set up in your computer).
```
# Install jdk8
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
import os
# Set environment variable JAVA_HOME.
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
!update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
!java -version
```
**Install Analytics Zoo**
You can install the latest release version or latest pre-release version using `pip install --pre --upgrade analytics-zoo`.
```
# Install latest release version of analytics-zoo
# Installing analytics-zoo from pip will automatically install pyspark, bigdl, and their dependencies.
!pip install --pre --upgrade analytics-zoo[ray]
```
## **Using Spark Dataframes for Distribtued Deep Learning**
In this guide we will describe how to use Spark Dataframes to process large-scale dataset.
#### **Intialization**
import necessary libraries
```
import os
import zipfile
import argparse
import numpy as np
from bigdl.dataset import base
from sklearn.model_selection import train_test_split
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca import OrcaContext
from zoo.orca.learn.tf2 import Estimator
from zoo.orca.data import SharedValue
from pyspark.sql.functions import col
import zoo.orca.data.pandas
```
## **Init Orca Context**
```
# recommended to set it to True when running Analytics Zoo in Jupyter notebook
OrcaContext.log_output = True # (this will display terminal's stdout and stderr in the Jupyter notebook).
cluster_mode = "local"
if cluster_mode == "local":
init_orca_context(cluster_mode="local", cores=1) # run in local mode
elif cluster_mode == "yarn":
init_orca_context(cluster_mode="yarn-client", num_nodes=2, cores=2, driver_memory="6g") # run on Hadoop YARN cluster
```
## **Data Preprocessing with Spark Dataframes**
Orca supports Spark Dataframes as the input to the distributed training, and as the input/output of the distributed inference. Consequently, the user can easily process large-scale dataset using Apache Spark, and directly apply AI models on the distributed (and possibly in-memory) Dataframes without data conversion or serialization.
```
# Download and extract movielens 1M data.
url = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'
local_file = base.maybe_download('ml-1m.zip', '.', url)
if not os.path.exists('./ml-1m'):
zip_ref = zipfile.ZipFile(local_file, 'r')
zip_ref.extractall('.')
zip_ref.close()
# Read in the dataset, and do a little preprocessing
rating_files="./ml-1m/ratings.dat"
new_rating_files="./ml-1m/ratings_new.dat"
if not os.path.exists(new_rating_files):
fin = open(rating_files, "rt")
fout = open(new_rating_files, "wt")
for line in fin:
# replace :: to : for spark 2.4 support
fout.write(line.replace('::', ':'))
fin.close()
fout.close()
# read csv
spark = OrcaContext.get_spark_session()
df = spark.read.csv(new_rating_files, sep=':', header=True, inferSchema=True).toDF(
"user", "item", "label", "timestamp")
user_set = df.select('user').collect()
item_set = df.select('item').collect()
min_user_id = min(user_set)[0]
max_user_id = max(user_set)[0]
min_item_id = min(item_set)[0]
max_item_id = max(item_set)[0]
print(min_user_id, max_user_id, min_item_id, max_item_id)
# update label starting from 0
df = df.withColumn('label', df.label-1)
# split to train/test dataset
train_data, test_data = df.randomSplit([0.8, 0.2], 100)
```
### **Define NCF Model**
```
def model_creator(config):
import tensorflow as tf
from tensorflow import keras
embedding_size=16
user = keras.layers.Input(dtype=tf.int32, shape=(None,))
item = keras.layers.Input(dtype=tf.int32, shape=(None,))
label = keras.layers.Input(dtype=tf.int32, shape=(None,))
with tf.name_scope("GMF"):
user_embed_GMF = keras.layers.Embedding(max_user_id + 1, embedding_size)(user)
item_embed_GMF = keras.layers.Embedding(max_item_id + 1, embedding_size)(item)
GMF = keras.layers.Multiply()([user_embed_GMF, item_embed_GMF])
with tf.name_scope("MLP"):
user_embed_MLP = keras.layers.Embedding(max_user_id + 1, embedding_size)(user)
item_embed_MLP = keras.layers.Embedding(max_item_id + 1, embedding_size)(item)
interaction = tf.concat([user_embed_MLP, item_embed_MLP], axis=-1)
layer1_MLP = keras.layers.Dense(units=embedding_size * 2, activation='relu')(interaction)
layer1_MLP = keras.layers.Dropout(rate=0.2)(layer1_MLP)
layer2_MLP = keras.layers.Dense(units=embedding_size, activation='relu')(layer1_MLP)
layer2_MLP = keras.layers.Dropout(rate=0.2)(layer2_MLP)
layer3_MLP = keras.layers.Dense(units=embedding_size // 2, activation='relu')(layer2_MLP)
layer3_MLP = keras.layers.Dropout(rate=0.2)(layer3_MLP)
# Concate the two parts together
with tf.name_scope("concatenation"):
concatenation = tf.concat([GMF, layer3_MLP], axis=-1)
outputs = keras.layers.Dense(units=5, activation='softmax')(concatenation)
model = keras.Model(inputs=[user, item], outputs=outputs)
model.compile(optimizer= "adam",
loss= "sparse_categorical_crossentropy",
metrics=['accuracy'])
return model
```
### **Fit with Orca Estimator**
```
batch_size=1280
epochs=2
model_dir='./'
# create an Estimator
est = Estimator.from_keras(model_creator=model_creator, workers_per_node=1)
stats = est.fit(train_data,
epochs=epochs,
batch_size=batch_size,
feature_cols=['user', 'item'],
label_cols=['label'],
steps_per_epoch=800000 // batch_size,
validation_data=test_data,
validation_steps = 200000 // batch_size)
checkpoint_path = os.path.join(model_dir, "NCF.ckpt")
est.save(checkpoint_path)
# evaluate with Estimator
stats = est.evaluate(test_data,
feature_cols=['user', 'item'],
label_cols=['label'],
num_steps=100000 // batch_size)
est.shutdown()
print(stats)
stop_orca_context()
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from data_builder import *
from utils import *
%matplotlib inline
from data_builder import *
from utils import *
import numpy as np
import matplotlib.pyplot as plt
import argparse
import pickle
from model_builder import get_models
def duffing(num_samples, T_max, dt, noise_std=0, seed=1, type=1):
"""simple pendulum"""
def hamiltonian_fn(coords, t):
q, p = np.split(coords, 2)
alpha = 1
beta = 1
omega = 1.2
delta = 0
gamma = .2
H = alpha * (q ** 2) / 2 + (p ** 2) / 2 + beta * (q ** 4) / 4 - q * gamma * sin(
omega * t) # pendulum hamiltonian
return H
def dynamics_fn(t, coords):
# dcoords = autograd.grad(hamiltonian_fn)(coords, t)
# dqdt, dpdt = np.split(dcoords, 2)
# S = np.concatenate([dpdt, -dqdt], axis=-1)
q, p = np.split(coords, 2)
alpha = -1
beta = 1
omega = 1.2
delta = 0.3
gamma = .2
if type == 1:
S = np.concatenate([p,-alpha*q-beta*q**3-delta*p+gamma*sin(omega*t)],axis=-1)
if type == 2:
gamma = 0.5
S = np.concatenate([p, -alpha * q - beta * q ** 3 - delta * p + gamma * sin(omega * t)], axis=-1)
if type == 3:
# alpha =1
# beta = 5
omega = 1.4
gamma, delta = 0.39, 0.1
S = np.concatenate([p, q - q ** 3 - delta * p + gamma * cos(omega * t)], axis=-1)
return S
def get_trajectory(t_span=[0, T_max], timescale=dt):
t_eval = np.arange(t_span[0], t_span[1], timescale)
y0 = np.random.rand(2) * 2 - 1
radius = np.sqrt(np.random.uniform(0.5, 1.5)) # np.random.rand() * 0.9 + 0.1 # sample a range of radii
y0 = y0 / np.sqrt((y0 ** 2).sum()) * (radius)
if type == 3:
y0 = [0., 0.]
omega = 1.4
dt_per_period = 100
period = 2 * np.pi / omega
dt = 2 * np.pi / omega / dt_per_period
t_span = [0, 18000]
t_eval = np.arange(0, 18000, dt)
spring_ivp = rk(fun=dynamics_fn, t_span=t_span, y0=y0, t_eval=t_eval, rtol=1e-10)
q, p = spring_ivp['y'][0], spring_ivp['y'][1]
dydt = [dynamics_fn(t_eval[i], y) for i, y in enumerate(spring_ivp['y'].T)]
dydt = np.stack(dydt).T
dqdt, dpdt = np.split(dydt, 2)
# add noise
q += np.random.randn(*q.shape) * noise_std
p += np.random.randn(*p.shape) * noise_std
return q, p, dqdt, dpdt, t_eval
data = {'meta': locals()}
# randomly sample inputs
np.random.seed(seed)
xs, dxs = [], []
ssr = 1 # int(srate / dt)
energies = []
tvalues = []
for s in range(num_samples):
x, y, dx, dy, t = get_trajectory()
x = x[::ssr]
y = y[::ssr]
dx = dx[::ssr]
dy = dy[::ssr]
xs.append(np.stack([x, y]).T)
energies.append([hamiltonian_fn(xs[-1][i], t[i]) for i in range(len(xs[-1]))])
dxs.append(np.stack([dx, dy]).T)
tvalues.append(t)
data['x'] = np.concatenate(xs)
data['dx'] = np.concatenate(dxs).squeeze()
data['energy'] = np.concatenate(energies)
data['tvalues'] = np.concatenate(tvalues)
return data
omega = 1.4
dt_per_period = 100
period = 2 * np.pi / omega
dt = 2 * np.pi / omega / dt_per_period
tevals = torch.arange(0,period,dt)
tevals.to('cpu')
tevals.requires_grad = True
tevals = tevals.float()
f = model_type.get_F(tevals.reshape(-1,1))
plt.plot(f.detach().numpy(),label='pred')
plt.plot(0.39*np.cos(1.4*tevals.detach().numpy()),label='gt')
plt.legend()
test_data = duffing(1, 18000, 0.1, noise_std=0, seed=5, type=3)
pstep = 100
plt.scatter(test_data['x'][::pstep,0],test_data['x'][::pstep,1])
test_data['x'][0]
def integrate_model(model, t_span, y0,t_eval, **kwargs):
def fun(t, np_x):
omega = 1.4
period = 2 * np.pi / omega
x = torch.tensor( np_x, requires_grad=True, dtype=torch.float32).view(1,2)
t = torch.tensor( np.mod(t,period), requires_grad=True, dtype=torch.float32).view(1,1)
dx = model.time_deriv(x,t).data.numpy().reshape(-1)
return dx
return solve_ivp(fun=fun, t_span=t_span, y0=y0,t_eval=t_eval, **kwargs)
def test_model(model_name,model):
# Each epoch has a training and validation phase
q = test_data['x']
t = test_data['tvalues']
# q.requires_grad = True
# tevals.requires_grad = True
qinit = q[0]
omega = 1.4
dt_per_period = 100
period = 2 * np.pi / omega
dt = 2 * np.pi / omega / dt_per_period
preds = integrate_model(model,[0,18000],qinit.ravel(),t_eval=np.arange(0, 18000, dt)).y
main_pred[model_name].append((preds.T,q))
# model_ft = HNN(2, 200, 1, 0.01)
main_pred={'baseline':[],'TDHNN4':[]}
model_nms = list(main_pred.keys())
for model_name in model_nms:
model_type = torch.load(f'duffing_3/{model_name}/model',map_location='cpu')
model_type.eval()
test_model(model_name,model_type)
pstep =100
sns.set_context("paper",font_scale=5.4, rc={'figure.figsize':(5,5),"font.size":40,"axes.titlesize":30,"axes.labelsize":20,'lines.linewidth':3})
sns.set_palette("Accent")
fig,ax = plt.subplots(1,3,figsize=(30,10))
# plt.figure(figsize=(15,10))
ax[0].scatter(main_pred['baseline'][0][0][::pstep,0],main_pred['baseline'][0][0][::pstep,1],label='predicted')
ax[0].set_title('Baseline NN')
ax[1].scatter(main_pred['TDHNN4'][0][0][::pstep,0],main_pred['TDHNN4'][0][0][::pstep,1],label='predicted')
ax[1].set_title('pHNN')
ax[2].scatter(test_data['x'][::pstep,0],test_data['x'][::pstep,1],c='black',label='ground truth')
ax[2].set_title('Ground Truth')
ax[2].set_xlim([-2,2])
ax[2].set_ylim([-1.2,1.2])
ax[1].set_xlim([-2,2])
ax[1].set_ylim([-1.2,1.2])
ax[0].set_xlim([-2,2])
ax[0].set_ylim([-1.2,1.2])
ax[0].axis('off')
ax[1].axis('off')
ax[2].axis('off')
# plt.tight_layout()
plt.savefig('main_fig.pdf',dpi=2400,bbox_inches='tight')
plt.figure()
plt.xlim([-2,2])
plt.ylim([-1.2,1.2])
gt =plt.hist2d(test_data['x'][::pstep,0],test_data['x'][::pstep,1],bins=1000,range=[[-2,2],[-1.2,1.2]],normed=True)[0]
phnn = plt.hist2d(main_pred['TDHNN4'][0][0][::pstep,0],main_pred['TDHNN4'][0][0][::pstep,1],bins=1000,normed=True,range=[[-2,2],[-1.2,1.2]])[0]
bnn = plt.hist2d(main_pred['baseline'][0][0][::pstep,0],main_pred['baseline'][0][0][::pstep,1],bins=1000,normed=True,range=[[-2,2],[-1.2,1.2]])[0]
def intersect_map(a,b):
zz = np.zeros(a.shape)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
if (a[i,j] == 1) and (b[i,j] == 1):
zz[i,j] = 1
return zz
gtphnn = intersect_map(gt,phnn)
gtbnn = intersect_map(gt,bnn)
gtphnn.sum()/,gtbnn.sum()
len(gt[gt!=0])
gt[gt!=0]=1
phnn[phnn!=0]=1
bnn[bnn!=0]=1
gt&phnn
(gt==phnn).sum(),(gt==bnn).sum()
((gt-phnn)**2).mean(),((gt-bnn)**2).mean()
((gt-phnn)**2).mean(),((gt-bnn)**2).mean()
qs = np.linspace(-2,2,1000)
ps = np.linspace(-1.2,1.2,1000)
QS,PS = np.gridspace()
(np.abs(main_pred['baseline'][0][0][::pstep] - test_data['x'][::pstep])).mean()
(np.abs(main_pred['TDHNN4'][0][0][::pstep] - test_data['x'][::pstep])).mean()
np.linalg.norm(main_pred['baseline'][0][0][::pstep] - test_data['x'][::pstep],ord=np.inf)
np.linalg.norm(main_pred['TDHNN4'][0][0][::pstep] - test_data['x'][::pstep],ord=np.inf)
fig,ax = plt.subplots(1,2,figsize=(25,10))
# plt.figure(figsize=(15,10))
ax[0].plot(test_data['x'][:500,0],c='black',label='ground truth')
ax[0].set_title('Ground Truth')
ax[0].plot(main_pred['TDHNN4'][0][0][:500,0],c='blue',label='predicted')
ax[0].set_title('Predicted')
# ax[1].plot(main_pred['TDHNN4'][0][0][::pstep,0],c='blue',label='predicted')
# ax[1].set_title('Predicted')
fig,ax = plt.subplots(1,2,figsize=(25,10))
# plt.figure(figsize=(15,10))
ax[0].plot(np.mean(np.square(test_data['x']-main_pred['TDHNN4'][0][0]),1),c='black',label='ground truth')
ax[0].set_title('Ground Truth')
# ax[1].plot(main_pred['TDHNN4'][0][0][::pstep,0],c='blue',label='predicted')
# ax[1].set_title('Predicted')
# The animation
fig, ax = plt.subplots(nrows=1,ncols=2)
x = test_data['x'][:,0]
xdot = test_data['x'][:,1]
# Poincaré section plot
ax4 = ax[0]
ax4.set_xlabel(r'$x / \mathrm{m}$')
ax4.set_ylabel(r'$\dot{x} / \mathrm{m\,s^{-1}}$')
ax4.scatter(x[::pstep], xdot[::pstep], s=2, lw=0, c='r')
scat1 = ax4.scatter([0], [0], lw=0, c='m')
x = main_pred['TDHNN4'][0][0][:,0]
xdot = main_pred['TDHNN4'][0][0][:,1]
# Poincaré section plot
ax4 = ax[1]
ax4.set_xlabel(r'$x / \mathrm{m}$')
ax4.set_ylabel(r'$\dot{x} / \mathrm{m\,s^{-1}}$')
ax4.scatter(x[::pstep], xdot[::pstep], s=2, lw=0, c='b')
scat1 = ax4.scatter([0], [0], lw=0, c='m')
plt.tight_layout()
# def animate(i):
# """Update the image for iteration i of the Matplotlib animation."""
# ln1.set_data(x[i], V(x[i]))
# ln2.set_data(t[:i+1], x[:i+1])
# ax2.set_xlim(t_trans, t[i])
# ln3.set_data(x[:i+1], xdot[:i+1])
# if not i % pstep:
# scat1.set_offsets(X[i])
# return
# anim = animation.FuncAnimation(fig, animate, frames=len(x), interval=1)
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
# anim.save('duffing.mp4', writer=writer)
train_data = get_dataset('duffing', 20, 3, 0.1, 0, seed=0,type=3)
plt.figure(figsize=(15,10))
plt.scatter(test_data['x'][::pstep,0],test_data['x'][::pstep,1],c='black')
plt.scatter(main_pred['TDHNN4'][0][0][::pstep,0],main_pred['TDHNN4'][0][0][::pstep,1],c='blue')
plt.scatter(train_data['x'][:,0],train_data['x'][:,1],c='red',marker='x',s=0.8)
plt.scatter(main_pred['TDHNN4'][0][1][::pstep,0],main_pred['TDHNN4'][0][1][::pstep,1])
plt.scatter(x[::pstep],xdot[::pstep])
deriv([0,0],0,gamma,delta,omega)
def dynamics_fn(coords,t):
# dcoords = autograd.grad(hamiltonian_fn)(coords, t)
# dqdt, dpdt = np.split(dcoords, 2)
# S = np.concatenate([dpdt, -dqdt], axis=-1)
q, p = np.split(coords,2)
omega = 1.4
gamma, delta = 0.39, 0.1
S = np.concatenate([p, q - q ** 3 - delta * p + gamma * np.cos(omega * t)], axis=-1)
return S
dynamics_fn(np.array([0,0]),0)
import numpy as np
from scipy.integrate import odeint, quad
from scipy.optimize import brentq
import matplotlib.pyplot as plt
from matplotlib import animation, rc
import seaborn as sbs
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 20})
rc('text', usetex=True)
rc('animation', html='html5')
# The potential and its first derivative, as callables.
V = lambda x: 0.5 * x**2 * (0.5 * x**2 - 1)
dVdx = lambda x: x**3 - x
# The potential energy function on a grid of x-points.
xgrid = np.linspace(-1.5, 1.5, 100)
Vgrid = V(xgrid)
plt.plot(xgrid, Vgrid)
plt.xlabel('$x$')
plt.ylabel('$V(x)$')
def deriv(X, t, gamma, delta, omega):
"""Return the derivatives dx/dt and d2x/dt2."""
x, xdot = X
xdotdot = -dVdx(x) -delta * xdot + gamma * np.cos(omega*t)
return xdot, xdotdot
def solve_duffing(tmax, dt_per_period, t_trans, x0, v0, gamma, delta, omega):
"""Solve the Duffing equation for parameters gamma, delta, omega.
Find the numerical solution to the Duffing equation using a suitable
time grid: tmax is the maximum time (s) to integrate to; t_trans is
the initial time period of transient behaviour until the solution
settles down (if it does) to some kind of periodic motion (these data
points are dropped) and dt_per_period is the number of time samples
(of duration dt) to include per period of the driving motion (frequency
omega).
Returns the time grid, t (after t_trans), position, x, and velocity,
xdot, dt, and step, the number of array points per period of the driving
motion.
"""
# Time point spacings and the time grid
period = 2*np.pi/omega
dt = 2*np.pi/omega / dt_per_period
step = int(period / dt)
t = np.arange(0, tmax, dt)
# Initial conditions: x, xdot
X0 = [x0, v0]
X = odeint(deriv, X0, t, args=(gamma, delta, omega))
idx = int(t_trans / dt)
return t[idx:], X[idx:], dt, step
# Set up the motion for a oscillator with initial position
# x0 and initially at rest.
x0, v0 = 0, 0
tmax, t_trans = 18000, 300
omega = 1.4
gamma, delta = 0.39, 0.1
dt_per_period = 100
# Solve the equation of motion.
t, X, dt, pstep = solve_duffing(tmax, dt_per_period, t_trans, x0, v0, gamma, delta, omega)
x, xdot = X.T
plt.scatter(x[::pstep],xdot[::pstep])
plt.scatter(res['x'][::,0],res['x'][::,1])
res['x'].shape
yhat = main_pred['TDHNN4'][0][0][:-1]#main_pred_coll[dex]
plt.plot(yhat[::20,0],yhat[::20,1])
# model_ft = HNN(2, 200, 1, 0.01)
model_dct = get_models(dt, type=None, hidden_dim=200)
model_nms = ['TDHNN4']
for model_name in model_nms:
model_type = torch.load(f'{dataset_name}/{model_name}/model',map_location='cpu')
model_type.eval()
test_model(model_name,model_type)
def hamiltonian_fn(coords):
alpha = -1
beta = 1
omega = 1.2
delta = 0.3
gamma = 0.2
q, p = coords[:,0],coords[:,1]
t = np.arange(0,T_max_t,dt)[1:]
H = alpha*q ** 2 / 2 + p ** 2/2 +beta*q**4/4 - q * gamma*sin(omega * t) # pendulum hamiltonian
return H
avg_perf = {}
import pandas as pd
new_df = pd.DataFrame(columns=['Method','State Error','Energy Error'])
for model in model_nms:
vals = main_pred[model]
for i in range(len(vals)):
pred = vals[i][0][:-1]
true = vals[i][1]
state_error = np.mean(np.square(pred-true))
energy_error = np.mean(np.square(hamiltonian_fn(pred)-hamiltonian_fn(true)))
new_df.loc[len(new_df)] = [model,state_error,energy_error]
fig,ax = plt.subplots(1,2,figsize=(20,5))
sns.set_context("poster")
g = sns.barplot(x='Method',y='State Error',data=new_df,ax=ax[0])
g.set_yscale('log')
g = sns.barplot(x='Method',y='Energy Error',data=new_df,ax=ax[1])
g.set_yscale('log')
sns.axes_style(style='ticks')
sns.set_context("paper",font_scale=1.4, rc={'figure.figsize':(5,5),"font.size":20,"axes.titlesize":20,"axes.labelsize":20,'lines.linewidth':3})
# model_nms = ['TDHNN','TDHNN3']
lab_list = model_nms
fig,axs = plt.subplots(1,4,figsize=(5*4,5))
ax = axs.ravel()
#ax = axs.ravel()
# lab_list.remove('TDHNN')
# lab_list.remove('TDHNN')
for dex,name in enumerate(lab_list):
yhat = main_pred[name][0][0][:-1]#main_pred_coll[dex]
true_batch = main_pred[name][0][1]
K_pred = hamiltonian_fn(yhat)
K_true = hamiltonian_fn(true_batch)
ham_pred = K_pred
ham_true = K_true
ax[0].set_title('State Rollout')
sns.lineplot(x=yhat[::,0],y=yhat[::,1],label=lab_list[dex],sort=False,ax=ax[0],legend=False)
#ax[0].scatter(input_batch[:,0],input_batch[:,1])
ax[0].set_xlabel('q')
ax[0].set_ylabel(r'$\dot{q}$')
ax[1].set_title('Energy Rollout')
sns.lineplot(x=range(len(ham_pred)),y=ham_pred,label=lab_list[dex],ax=ax[1],legend=False)
ax[1].set_xlabel('steps')
ax[1].set_ylabel('Energy')
ax[2].set_title('State MSE')
l1 =sns.lineplot(x=range(len(yhat)),y=((yhat-true_batch)**2).mean(1),label=lab_list[dex],ax=ax[2],legend=False)
ax[2].set_xlabel('steps')
ax[2].set_ylabel('MSE')
ax[3].set_title('Energy MSE')
l2 =sns.lineplot(x=range(len(yhat)),y=(ham_pred-ham_true)**2,label=lab_list[dex],ax=ax[3],legend=False)
ax[3].set_xlabel('steps')
ax[3].set_ylabel('MSE')
sns.lineplot(x=true_batch[:,0],y=true_batch[:,1],markers='x',label='Ground Truth',ax=ax[0],sort=False,legend=False)
sns.lineplot(x=range(len(ham_pred)),y=ham_true,label='Ground Truth',ax=ax[1],legend=False)
sns.lineplot(x=range(len(yhat)),y=np.zeros(len(yhat)),label='Ground Truth',ax=ax[2],legend=False)
sns.lineplot(x=range(len(yhat)),y=np.zeros(len(yhat)),label='Ground Truth',ax=ax[3],legend=False)
ax[2].ticklabel_format(axis="y", style="sci", scilimits=(0,0))
ax[3].ticklabel_format(axis="y", style="sci", scilimits=(0,0))
# ax[1].set_yscale('log')
# ax[2].set_yscale('log')
handles, labels = ax[3].get_legend_handles_labels()
fig.legend(handles, labels, loc='lower center',bbox_to_anchor=(0.5, -0.015),fancybox=True,ncol=6)
#plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('mass_spring_long.pdf',dpi=2400,bbox_inches='tight')
sns.axes_style(style='ticks')
sns.set_context("paper",font_scale=1.4, rc={'figure.figsize':(5,5),"font.size":20,"axes.titlesize":20,"axes.labelsize":20,'lines.linewidth':3})
lab_list = model_nms
fig,axs = plt.subplots(1,2,figsize=(5*4,5))
ax = axs.ravel()
#ax = axs.ravel()
# lab_list.remove('HNN')
# lab_list.remove('TDHNN')
# lab_list.remove('TDHNN1')
for dex,name in enumerate(lab_list):
yhat = main_pred[name][0][0][:-1]#main_pred_coll[dex]
true_batch = main_pred[name][0][1]
ax[0].set_title('State Rollout')
sns.lineplot(x=range(len(yhat)),y=yhat[::,0],label=lab_list[dex],sort=False,ax=ax[0],legend=False)
#ax[0].scatter(input_batch[:,0],input_batch[:,1])
ax[0].set_xlabel('t')
ax[0].set_ylabel(r'$q$')
ax[1].set_title('State Rollout')
sns.lineplot(x=range(len(yhat)),y=yhat[::,1],label=lab_list[dex],sort=False,ax=ax[1],legend=False)
#ax[0].scatter(input_batch[:,0],input_batch[:,1])
ax[1].set_xlabel('t')
ax[1].set_ylabel(r'$\dot{q}$')
sns.lineplot(x=range(len(yhat)),y=true_batch[:,0],markers='x',label='Ground Truth',ax=ax[0],sort=False,legend=False)
sns.lineplot(x=range(len(yhat)),y=true_batch[:,1],markers='x',label='Ground Truth',ax=ax[1],sort=False,legend=False)
plt.legend()
model_type
for name, param in model_type.named_parameters():
if param.requires_grad:
print (name, param.data)
torch.sigmoid(torch.tensor(1.52))
torch.sigmoid(torch.tensor(0.056))
tevals = torch.arange(0,24.01,0.01)
tevals.to(device)
tevals.requires_grad = True
tevals = tevals.float()
f = model_type.get_F(tevals.reshape(-1,1))
plt.plot(f.detach().numpy(),label='pred')
plt.plot(8*np.cos(0.5*tevals.detach().numpy()),label='gt')
plt.legend()
for batch_i, (q, q_next, energy_, qdx, tevals) in enumerate(data_dict['valid']):
q=q.to(device)
q=q.float()
q.requires_grad=True
tevals.to(device)
tevals.requires_grad = True
tevals = tevals.float()
f = model_type.get_D()
print(f,q[:,1],(f*q[:,1]))
plt.plot((f[0]*q[:,1]).detach().numpy(),label='pred')
plt.plot(-0.3*q[:,1].detach().numpy(),label='gt')
break
q = torch.arange(-5,5,0.1)
qd = torch.arange(-5,5,0.1)
q= q.float()
qd = qd.float()
q.requires_grad = True
qd.requires_grad = True
qvec = torch.cat([q.reshape(-1,1),qd.reshape(-1,1)],1)
f = model_type.get_H(qvec)
qm,qdm = torch.meshgrid(q,qd)
f = model_type.get_H(torch.cat([qm.reshape(-1,1),qdm.reshape(-1,1)],1))
plt.contourf(qm.detach().numpy(),qdm.detach().numpy(),f.reshape(-1,qm.shape[0]).detach().numpy())
model_type.f3.weight
```
| github_jupyter |
### Data Exploration of a publicly available dataset.
<img align="right" src="http://www.sharielf.com/gifs/zz032411pony.jpg" width="220px">
Data processing, cleaning and normalization is often 95% of the battle. Never underestimate this part of the process, if you're not careful about it your derrière will be sore later. Another good reason to spend a bit of time on understanding your data is that you may realize that the data isn't going to be useful for your task at hand. Quick pruning of fruitless branches is good.
#### Data as an analogy: Data is almost always a big pile of shit, the only real question is, "Is there a Pony inside?" and that's what data exploration and understanding is about. ####
For this exploration we're going to pull some data from the Malware Domain List website [http://www.malwaredomainlist.com](http://www.malwaredomainlist.com). We'd like to thank them for providing a great resourse and making their data available to the public. In general data is messy so even though we're going to be nit-picking quite a bit, we recognized that many datasets will have similar issues which is why we feel like this is a good 'real world' example of data.
* Full database: [ http://www.malwaredomainlist.com/mdlcsv.php](http://www.malwaredomainlist.com/mdlcsv.php)
```
# This exercise is mostly for us to understand what kind of data we have and then
# run some simple stats on the fields/values in the data. Pandas will be great for that
import pandas as pd
pd.__version__
# Set default figure sizes
pylab.rcParams['figure.figsize'] = (14.0, 5.0)
# This data url can be a web location http://foo.bar.com/mydata.csv or it can be a
# a path to your disk where the data resides /full/path/to/data/mydata.csv
# Note: Be a good web citizen, download the data once and then specify a path to your local file :)
# For instance: > wget http://www.malwaredomainlist.com/mdlcsv.php -O mdl_data.csv
# data_url = 'http://www.malwaredomainlist.com/mdlcsv.php'
data_url = 'data/mdl_data.csv'
# Note: when the data was pulled it didn't have column names, so poking around
# on the website we found the column headers referenced so we're explicitly
# specifying them to the CSV reader:
# date,domain,ip,reverse,description,registrant,asn,inactive,country
dataframe = pd.read_csv(data_url, names=['date','domain','ip','reverse','description',
'registrant','asn','inactive','country'], header=None, error_bad_lines=False, low_memory=False)
dataframe.head(5)
dataframe.tail(5)
# We can see there's a blank row at the end that got filled with NaNs
# Thankfully Pandas is great about handling missing data.
print dataframe.shape
dataframe = dataframe.dropna()
dataframe.shape
# For this use case we're going to remove any rows that have a '-' in the data
# by replacing '-' with NaN and then running dropna() again
dataframe = dataframe.replace('-', np.nan)
dataframe = dataframe.dropna()
dataframe.shape
# Drilling down into one of the columns
dataframe['description']
# Pandas has a describe method
# For numerical data it give a nice set of summary statistics
# For categorical data it simply gives count, unique values
# and the most common value
dataframe['description'].describe()
# We can get a count of all the unique values by running value_counts()
dataframe['description'].value_counts()
# We noticed that the description values just differ by whitespace or captilization
dataframe['description'] = dataframe['description'].map(lambda x: x.strip().lower())
dataframe['description']
# First thing we noticed was that many of the 'submissions' had the exact same
# date, which we're guessing means some batch jobs just through a bunch of
# domains in and stamped them all with the same date.
# We also noticed that many values just differ by captilization (this is common)
dataframe = dataframe.applymap(lambda x: x.strip().lower() if not isinstance(x,float64) else x)
dataframe.head()
# The domain column looks to be full URI instead of just the domain
from urlparse import urlparse
dataframe['domain'] = dataframe['domain'].astype(str)
dataframe['domain'] = dataframe['domain'].apply(lambda x: "http://" + x)
dataframe['domain'] = dataframe['domain'].apply(lambda x: urlparse(x).netloc)
```
### Two columns that are a mistaken copy of each other?...
We also suspect that the 'inactive' column and the 'country' column are exactly the same, also why is there one row in the inactive column with a value of '2'?
<pre>
"Ahhh, what an awful dream. Ones and zeroes everywhere... and I thought I saw a two [shudder]."
-- Bender
"It was just a dream, Bender. There's no such thing as two".
-- Fry
</pre>
```
# Using numpy.corrcoef to compute the correlation coefficient matrix
np.corrcoef(dataframe["inactive"], dataframe["country"])
# Pandas also has a correlation method on it's dataframe which has nicer output
dataframe.corr()
# Yeah perfectly correlated, so looks like 'country'
# is just the 'inactive' column duplicated.
# So what happened here? Seems bizarre to have a replicated column.
```
#### Okay well lets try to get something out of this pile. We'd like to run some simple statistics to see what correlations the data might contain.
#### G-test is for goodness of fit to a distribution and for independence in contingency tables. It's related to chi-squared, multinomial and Fisher's exact test, please see http://en.wikipedia.org/wiki/G_test.
```
# The data hacking repository has a simple stats module we're going to use
import data_hacking.simple_stats as ss
# Spin up our g_test class
g_test = ss.GTest()
# Here we'd like to see how various exploits (description) are related to
# the ASN (Autonomous System Number) associated with the ip/domain.
(exploits, matches, cont_table) = g_test.highest_gtest_scores(
dataframe['description'], dataframe['asn'], N=5, matches=5)
ax = exploits.T.plot(kind='bar', stacked=True)
pylab.ylabel('Exploit Occurrences')
pylab.xlabel('ASN (Autonomous System Number)')
patches, labels = ax.get_legend_handles_labels()
ax.legend(patches, labels, loc='upper right')
# The plot below is showing the number of times a particular exploit was associated with an ASN.
# Interesing to see whether exploits are highly correlated to particular ASNs.
# Now we use g_test with the 'reverse=True' argument to display those exploits
# that do not have a high correlation with a particular ASN.
exploits, matches, cont_table = g_test.highest_gtest_scores(dataframe['description'],
dataframe['asn'], N=7, reverse=True, min_volume=500, matches=15)
ax = exploits.T.plot(kind='bar', stacked=True)
pylab.ylabel('Exploit Occurrences')
pylab.xlabel('ASN (Autonomous System Number)')
patches, labels = ax.get_legend_handles_labels()
ax.legend(patches, labels, loc='best')
# The plot below is showing exploits who aren't associated with any particular ASN.
# Interesing to see exploits that are spanning many ASNs.
exploits, matches, cont_table = g_test.highest_gtest_scores(dataframe['description'],
dataframe['domain'], N=5)
ax = exploits.T.plot(kind='bar', stacked=True) #, log=True)
pylab.ylabel('Exploit Occurrences')
pylab.xlabel('Domain')
patches, labels = ax.get_legend_handles_labels()
ax.legend(patches, labels, loc='best')
# The Contingency Table below is just showing the counts of the number of times
# a particular exploit was associated with an TLD.
# Drilling down on one particular exploit
banker = dataframe[dataframe['description']=='trojan banker'] # Subset dataframe
exploits, matches, cont_table = g_test.highest_gtest_scores(banker['description'], banker['domain'], N=5)
import pprint
pprint.pprint(["Domain: %s Count: %d" % (domain,count) for domain,count in exploits.iloc[0].iteritems()])
```
### So switching gears, perhaps we'll look at date range, volume over time, etc.
Pandas also has reasonably good functionality for date/range processing and plotting.
```
# Add the proper timestamps to the dataframe replacing the old ones
dataframe['date'] = dataframe['date'].apply(lambda x: str(x).replace('_','T'))
dataframe['date'] = pd.to_datetime(dataframe['date'])
# Now prepare the data for plotting by pivoting on the
# description to create a new column (series) for each value
# We're going to add a new column called value (needed for pivot). This
# is a bit dorky, but needed as the new columns that get created should
# really have a value in them, also we can use this as our value to sum over.
subset = dataframe[['date','description']]
subset['count'] = 1
pivot = pd.pivot_table(subset, values='count', rows=['date'], cols=['description'], fill_value=0)
by = lambda x: lambda y: getattr(y, x)
grouped = pivot.groupby([by('year'),by('month')]).sum()
# Only pull out the top 7 desciptions (exploit types)
topN = subset['description'].value_counts()[:7].index
grouped[topN].plot()
pylab.ylabel('Exploit Occurrences')
pylab.xlabel('Date Submitted')
# The plot below shows the volume of particular exploits impacting new domains.
# Tracking the ebb and flow of exploits over time might be useful
# depending on the type of analysis you're doing.
# The rise and fall of the different exploits is intriguing but
# the taper at the end is concerning, let look at total volume of
# new malicious domains coming into the MDL database.
total_mdl = dataframe['description']
total_mdl.index=dataframe['date']
total_agg = total_mdl.groupby([by('year'),by('month')]).count()
matplotlib.pyplot.figure()
total_agg.plot(label='New Domains in MDL Database')
pylab.ylabel('Total Exploits')
pylab.xlabel('Date Submitted')
matplotlib.pyplot.legend()
```
### That doesn't look good...
The plot above shows the total volume of ALL newly submitted domains. We see from the plot that the taper is a general overall effect due to a drop in new domain submissions into the MDL database. Given the recent anemic volume there might be another data source that has more active submissions.
Well the anemic volume issue aside we're going to carry on by looking at the correlations in volume over time. In other words are the volume of reported exploits closely related to the volume of other exploits...
### Correlations of Volume Over Time
<ul>
<li>**Prof. Farnsworth:** Behold! The Deathclock!
<li>**Leela:** Does it really work?
<li>**Prof. Farnsworth:** Well, it's occasionally off by a few seconds, what with "free will" and all.
</ul>
```
# Only pull out the top 20 desciptions (exploit types)
topN = subset['description'].value_counts()[:20].index
corr_df = grouped[topN].corr()
# Statsmodels has a correlation plot, we expect the diagonal to have perfect
# correlation (1.0) but anything high score off the diagonal means that
# the volume of different exploits are temporally correlated.
import statsmodels.api as sm
corr_df.sort(axis=0, inplace=True) # Just sorting so exploits names are easy to find
corr_df.sort(axis=1, inplace=True)
corr_matrix = corr_df.as_matrix()
pylab.rcParams['figure.figsize'] = (8.0, 8.0)
sm.graphics.plot_corr(corr_matrix, xnames=corr_df.index.tolist())
plt.show()
```
#### Discussion of Correlation Matrix
* The two sets of 3x3 red blocks on the lower right make intuitive sense, Zeus config file, drop zone and trojan show almost perfect volume over time correlation.
```
pylab.rcParams['figure.figsize'] = (14.0, 3.0)
print grouped[['zeus v1 trojan','zeus v1 config file','zeus v1 drop zone']].corr()
grouped[['zeus v1 trojan','zeus v1 config file','zeus v1 drop zone']].plot()
pylab.ylabel('Exploit Occurrences')
pylab.xlabel('Date Submitted')
grouped[['zeus v2 trojan','zeus v2 config file','zeus v2 drop zone']].plot()
pylab.ylabel('Exploit Occurrences')
pylab.xlabel('Date Submitted')
# Drilling down on the correlation between 'trojan' and 'phoenix exploit kit'
print grouped[['trojan','phoenix exploit kit']].corr()
grouped[['trojan','phoenix exploit kit']].plot()
pylab.ylabel('Exploit Occurrences')
pylab.xlabel('Date Submitted')
```
### Interesting? (shrug... maybe...)
Looking above we see that the generic 'trojan' label and the fairly specific 'phoenix exploit kit' have a reasonable volume over time correlation of .834 *(PearsonsR is the default for the corr() function; a score of 1.0 means perfectly correlated [Pearson's Correlation](http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient))*. So it certainly might be something to dive into depending on your particular interest, again the win here is that with a few lines of python code we can 'see' these kinds of relationships.
### Conclusions
So this exercise was an exploration of the dataset. At this point we have a good idea about what's in the dataset, what cleanup issues we might have and the overall quality of the dataset. We've run some simple correlative statistics and produced some nice plots. Most importantly we should have a good feel for whether this dataset is going to suite our needs for whatever use case we may have.
In the next exercise we're going look at some syslog data. We'll take it up a notch by computing similarities with 'Banded MinHash', running a heirarchical clustering algorithm and exercising some popular supervised machine learning functionality from Scikit Learn http://scikit-learn.org/.
| github_jupyter |
```
import (
"encoding/csv"
"os"
"strconv"
"sort"
"math"
"fmt"
)
func loadData() ([][]float64, []string, error) {
f, err := os.Open("iris.csv")
if err != nil {
return nil, nil, err
}
defer f.Close()
r := csv.NewReader(f)
r.Comma = ','
r.LazyQuotes = true
_, err = r.Read()
if err != nil {
return nil, nil, err
}
rows, err := r.ReadAll()
if err != nil {
return nil, nil, err
}
X := [][]float64{}
Y := []string{}
for _, cols := range rows {
x := make([]float64, 4)
y := cols[4]
for j, s := range cols[:4] {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, nil, err
}
x[j] = v
}
X = append(X, x)
Y = append(Y, y)
}
return X, Y, nil
}
X, Y, err := loadData()
if err != nil {
panic(err)
}
var trainX, testX [][]float64
var trainY, testY []string
for i, _ := range X {
if i%2 == 0 {
trainX = append(trainX, X[i])
trainY = append(trainY, Y[i])
} else {
testX = append(testX, X[i])
testY = append(testY, Y[i])
}
}
type KNN struct {
k int
XX [][]float64
Y []string
}
func distance(lhs, rhs []float64) float64 {
val := 0.0
for i, _ := range lhs {
val += math.Pow(lhs[i] - rhs[i], 2)
}
return math.Sqrt(val)
}
func (knn *KNN) predict(X [][]float64) []string {
results := []string{}
for _, x := range X {
type item struct {
i int
f float64
}
var items []item
for i, xx := range knn.XX {
items = append(items, item {
i: i,
f: distance(x, xx),
})
}
sort.Slice(items, func(i, j int) bool {
return items[i].f < items[j].f
})
var labels []string
for i := 0; i < knn.k; i++ {
labels = append(labels, knn.Y[items[i].i])
}
founds := map[string]int{}
for _, label := range labels {
founds[label] += 1
}
type rank struct {
i int
s string
}
var ranks []rank
for k, v := range founds {
ranks = append(ranks, rank {
i: v,
s: k,
})
}
sort.Slice(ranks, func(i, j int) bool {
return ranks[i].i > ranks[j].i
})
results = append(results, ranks[0].s)
}
return results
}
knn := KNN {
k: 8,
XX: trainX,
Y: trainY,
}
predicted := knn.predict(testX)
correct := 0
for i, _ := range predicted {
if predicted[i] == testY[i] {
correct += 1
}
}
fmt.Printf("%f%%\n", float64(correct)/float64(len(predicted))*100)
```
| github_jupyter |
# Recommender - Proof of Concept
This notebook is to test functionality of recommendation functions.
# Libraries
```
%matplotlib inline
%load_ext autoreload
%autoreload 2 #would be where you need to specify the files
#%aimport comic_recs
# Pyspark imports
import pyspark
from pyspark.sql import SparkSession
import sys
sys.path.append('..')
# Model functions
import comic_recs as cr
# spark config
spark = SparkSession \
.builder \
.appName("movie recommendation") \
.config("spark.driver.maxResultSize", "1g") \
.config("spark.driver.memory", "1g") \
.config("spark.executor.memory", "4g") \
.config("spark.master", "local[*]") \
.getOrCreate()
```
## Data Prep
These data/modeling related tasks need to be prepared beforehand.
### 1. Retrieve comics list as PySpark dataframe
List of known comics titles.
```
comics_df = spark.read.json('support_data/comics.json')
comics_df.persist()
comics_df.show(5)
```
### 2. Retrieve training data
All comic titles existing users have bought or subscribed.
```
comics_sold = spark.read.json('raw_data/als_input_filtered.json')
comics_sold.persist()
comics_sold.show(5)
```
### 3. Set up model parameters
Parameters we have previously found through grid searching / cross-validation.
## Recommendations for New Users
Parameters as determined in our fitting process (see NB7)
```
# Create dictionary of candidate parameters
model_params = {'maxIter': 10
,'rank': 5
,'regParam': 0.1
,'alpha': 100
,'seed': 1234
}
```
### Create list of existing user preferences
E.g. "I currently read or like these comic books." Doesn't have to be exact match, but the closer to an actual title, the better. We are just doing simple wildcard matches on titles.
```
reading_list = ['Transformers', 'GI Joe', 'Y The Last Man', 'Saga', 'Avengers'
,'Paper Girls', 'Star Wars']
reading_list = ['Batman', 'Sherlock Holmes', 'Attack on Titan', 'Thor']
reading_list = ['AVengers', 'wolverine', 'phoenix', 'deadpool']
reading_list = ['Moon Knight']
reading_list = ['Paper Girls']
```
### Get Recommendations!
Use the above inputs and decide how many comics to input into recommendation function.
```
recommendations = cr.make_comic_recommendations(reading_list=reading_list
,top_n=10
,comics_df=comics_df
,train_data=comics_sold
,model_params=model_params
,spark_instance=spark
)
recommendations
# What if we just want value of first row?
recommendations.head(1)['comic_title'].values[0]
```
| github_jupyter |
Before we begin, let's execute the cell below to display information about the CUDA driver and GPUs running on the server by running the `nvidia-smi` command. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell.
```
!nvidia-smi
```
## Learning objectives
The **goal** of this lab is to:
- Learn how to run the same code on both a multicore CPU and a GPU using the OpenMP Target programming model
- Understand the key directives and steps involved in making a sequential code parallel
We do not intend to cover:
- Optimization techniques in details
# OpenMP Directives
- OpenMP has been formed in 1997 to focus on vendor-neutral Shared Memory Parallelism.
- OpenMP 4.0 in 2013 expanded its focus beyond shared memory parallel computers including accelerators.
- The OpenMP 4.0 target construct provides the means to offload data and computation to accelerators.
Like OpenACC, OpenMP is directive based. Compiler directives appear as comments in your source code and are ignored by compilers unless you tell them otherwise - usually by specifying the appropriate compiler flag.
In this notebook we will be using the OpenMP target construct to offload data and computation to GPU. Multiple compilers are in development to support OpenMP offloading to NVIDIA GPUs. We will using NVIDIA HPC SDK compiler for this tutorial.
## OpenMP Syntax
```#pragma omp directive ```
**#pragma** in C/C++ is what's known as a "compiler hint." These are very similar to programmer comments, however, the compiler will actually read our pragmas. Pragmas are a way for the programmer to "guide" the compiler, without running the chance damaging the code. If the compiler does not understand the pragma, it can ignore it, rather than throw a syntax error.
**omp** is an addition to our pragma, it is known as the “sentinel”. It specifies that this is an OpenMP pragma. Any non-OpenMP compiler will ignore this pragma.
**directives** are commands in OpenMP that will tell the compiler to do some action. For now, we will only use directives that allow the compiler to parallelize our code.
For beginners who are new to OpenMP directive, we will be introducing some terminologies and concepts before starting to add ```target``` directives to our code to offload onto GPU computation and data.
## OpenMP Fork-Join Model
OpenMP uses the fork-join model of parallel execution. All OpenMP programs begin as a single process: the master thread. The master thread executes sequentially until the first parallel region construct is encountered.
**FORK**: the master thread then creates a team of parallel threads.The statements in the program that are enclosed by the parallel region construct are then executed in parallel among the various team threads.
**JOIN**: When the team threads complete the statements in the parallel region construct, they synchronize and terminate, leaving only the master thread.
<img src="../images/openmp_fork_join.png">
## OpenMP Parallel Region
A parallel region is a block of code that will be executed by multiple threads. This is the fundamental OpenMP parallel construct. When a thread reaches a PARALLEL directive, it creates a team of threads and becomes the master of the team. The master is a member of that team. Starting from the beginning of this parallel region, the code is duplicated and all threads will execute that code redundantly.There is an implied barrier at the end of a parallel region. Only the master thread continues execution past this point
```cpp
//Include the header file
#include <omp.h>
main(int argc, char *argv[]) {
int nthreads;
/* Fork a team of threads*/
#pragma omp parallel
{
/* Obtain and print thread id */
printf("Hello World from thread = %d\n", omp_get_thread_num());
/* Only master thread does this */
if (omp_get_thread_num() == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and terminate */
}
```
<img src="../images/openmp_parallel_construct.png">
## OpenMP Data-sharing
In OpenMP, several constructs accept clauses that allow the user to control the data sharing. For example, you can use one of below clauses in a *Parallel* construct.
- `private`: Declares variables to be private to each thread in a team. Private copies of the variable are initialized from the original object when entering the region.
- `shared`: Shares variables among all the threads in a team.
- `default`: Enables you to affect the data-scope attributes of variables.
```cpp
#pragma omp parallel for default(shared) private(dx)
{
for (int i=0; i < N; i++){
for (int j=0; j < N; j++){
dx = a[i] + b[j];
}
}
```
## OpenMP Work-sharing
As described before ```parallel``` construct creates team of theads and the execution continues redundantly on all threads of team. Ideally we would need all threads within the team to work share i.e. spilt the work. A work-sharing construct divides the execution of the enclosed code region among the members of the team that encounter it. Work-sharing constructs do not launch new threads but Divides (“workshares”) the iterations of the loop across the threads in the team . There is no implied barrier upon entry to a work-sharing construct, however there is an implied barrier at the end of a work sharing construct.
There are multiple ways to allow worksharing, the code below makes use of ```for``` to divide the iteration of loop among threads.
```cpp
//Create a team of threads
#pragma omp parallel
{
//workshare this loop across those threads.
#pragma omp for
for (i=0; i < N; i++)
c[i] = a[i] + b[i];
} /* end of parallel region */
```
<img src="../images/openmp_parallelfor_construct.png">
## OpenMP Target Offloading
By now you should have got familiar with the OpenMP programming model. Now let us start introducing key directives and construct used to add GPU offloading.
### ```target ```
```target``` construct consists of a target directive and an execution region. ```target``` directive define a target region, which is a block of computation that operates within a distinct data environment and is intended to be offloaded onto a parallel computation device during execution ( GPU in our case). Data used within the region may be implicitly or explicitly mapped to the device. All of OpenMP is allowed within target regions, but only a subset will run well on GPUs.
The example below shows usage of target directive with implicitly mapped data
```cpp
while (iter < iter_max )
{
error = 0.0;
//Moves this region of code to the GPU and implicitly maps data.
#pragma omp target
{
#pragma omp parallel for reduction(max:error)
for( int j = 1; j < n-1; j++) {
ANew[j] = A [j-1] + A[j+1];
}
}
iter++;
}
```
### ```target data``` to explicitly map the data
Map a variable to/from the device.Map directive helps developer to explicitly define and reduce data copies. The ```target data```construct is used to mark such regions
```cpp
#pragma omp target map(map-type: list)
```
Example of mapping data directives are as follows:
- to (list)
- Allocates memory on the device and copies data in when entering the region, the values are not copied back
- from (list)
- Allocates memory on the device and copies the data to the host when exiting the region
- alloc (list)
- Allocates memory on the device. If the data is already present on the device a reference counter is incremented
```cpp
while (iter < iter_max )
{
error = 0.0;
//Moves this region of code to the GPU and explicitly maps data.
#pragma omp target data map(to:A[:n]) map(from:ANew[:n])
{
#pragma omp parallel for reduction(max:error)
for( int j = 1; j < n-1; j++) {
ANew[j] = A [j-1] + A[j+1];
}
}
iter++;
}
```
### ```teams``` directive
```teams``` directve creates a league of thread teams where the master thread of each team executes the region. Each of these master threads executes sequentially. Or in other words teams directive spawn 1 or more thread teams with the same number of threads. The execution continues on the master threads of each team (redundantly). There is no synchronization allowed between teams.
OpenMP calls that somewhere a team, which might be a thread on the CPU or maying a CUDA threadblock or OpenCL workgroup. It will choose how many teams to create based on where you're running, only a few on a CPU (like 1 per CPU core) or lots on a GPU (1000's possibly). ```teams``` allow OpenMP code to scale from small CPUs to large GPUs because each one works completely independently of each other ```teams```.
<img src="../images/openmp_target_teams.png">
### ```distribute```
There's a good chance that we don't want the loop to be run redundantly in every master thread of ```teams``` though, that seems wasteful and potentially dangerous. With usage of ```distribute``` construct the iterations of the next loop are broken into groups that are “distributed” to the master threads of the teams. The iterations are distributed statically and there’s no guarantees about the order teams will execute. Also it does not generate parallelism/worksharing within the thread teams.
<img src="../images/openmp_target_distribute.png">
Th example below of simple stencil code shows the usage of ```distribute``` along with ```team```:
<img src="../images/openmp_teams.png">
### Work sharing to improve parallelism
As shown in the image only the master thread performs the computation which is not so optimzal in case of GPU architecture. To solve this problem we will make use of work-sharing as we did before. When any team encounters a worksharing construct, the work inside the construct is divided among the members of the team, and executed cooperatively instead of being executed by every thread. There are many work sharing constructs defined, the one that we plan to use is :
```
#pragma omp parallel for
```
<img src="../images/openmp_teams_for.png">
## Atomic Construct
In the code you will also require one more construct which will help you in getting the right results. OpenMP atomic construct ensures that a particular variable is accessed and/or updated atomically to prevent indeterminate results and race conditions. In other words, it prevents one thread from stepping on the toes of other threads due to accessing a variable simultaneously, resulting in different results run-to-run. For example, if we want to count the number of elements that have a value greater than zero, we could write the following:
```cpp
if ( val > 0 )
{
#pragma omp atomic
{
cnt++;
}
}
```
Now, lets start modifying the original code and add the OpenMP directives. From the top menu, click on *File*, and *Open* `rdf.cpp` and `dcdread.h` from the current directory at `C/source_code/openmp` directory. Remember to **SAVE** your code after changes, before running below cells.
### Compile and Run for Multicore
Having added OpenMP directives, let us compile the code. We will be using NVIDIA HPC SDK compiler for this exercise. The flags used for enabling OpenMP target offloading are as follows:
<!--
**-fopenmp** : This flag will give tell compiler to parse and act on OpenMP directive.
**-fopenmp-targets** : This flag allows us to compile our code for a specific target parallel hardware. Without this flag, the code will be compiled for multicore execution.
-->
`-mp=gpu|multicore` : Select the target device for all parallel programming paradigms used (OpenACC, OpenMP, Standard Languages)
- `gpu` Globally set the target device to an NVIDIA GPU
- `multicore` Globally set the target device to the host CPU
**NOTE:** `-Minfo=mp` enables OpenMP information.
```
#Compile the code for muticore
!cd ../../source_code/openmp && nvc++ -mp=multicore -Minfo=mp -I/opt/nvidia/hpc_sdk/Linux_x86_64/21.3/cuda/11.2/include -o rdf rdf.cpp
```
Inspect the compiler feedback (you should get a similar output as below) you can see from *Line 174* that it is generating a multicore code `174, Generating Multicore code`.
<img src="../images/openmp_feedback_multicore.png">
Make sure to validate the output by running the executable and validate the output.
```
#Run the multicore code and check the output
!cd ../../source_code/openmp && ./rdf && cat Pair_entropy.dat
```
The output should be the following:
```
s2 value is -2.43191
s2bond value is -3.87014
```
```
#profile and see output of nvptx
!cd ../../source_code/openmp && nsys profile -t nvtx --stats=true --force-overwrite true -o rdf_multicore ./rdf
```
Let's checkout the profiler's report. [Download the profiler output](../../source_code/openmp/rdf_multicore.qdrep) and open it via the GUI. Have a look at the example expected profiler report below:
<img src="../images/openmp_multicore.png">
Feel free to checkout the [solution](../../source_code/openmp/SOLUTION/rdf_offload.cpp) to help you understand better.
### Compile and Run for an NVIDIA GPU
Without changing the code now let us try to recompile the code for NVIDIA GPU and rerun.
The only difference is that now we pass `gpu` value to the `-mp` compiler option.`-mp=gpu`. **Understand and analyze** the solution present at:
[RDF Code](../../source_code/openmp/SOLUTION/rdf_offload.cpp)
[File Reader](../../source_code/openmp/SOLUTION/dcdread.h)
Open the downloaded files for inspection.
```
#compile for Tesla GPU
!cd ../../source_code/openmp && nvc++ -mp=gpu -Minfo=mp -o rdf rdf.cpp
```
Inspect the compiler feedback (you should get a similar output as below) and you can see below:
- *Line 86* shows variables mapped to the device
- *Line 174* shows the GPU kernel is generated `Generating "nvkernel__Z8pair_gpuPKdS0_S0_Pjiidddi_F1L174_1" GPU kernel`
<img src="../images/openmp_feedback.png">
Make sure to validate the output by running the executable and validate the output.
```
#Run on Nvidia GPU and check the output
!cd ../../source_code/openmp && ./rdf && cat Pair_entropy.dat
```
The output should be the following:
```
s2 value is -2.43191
s2bond value is -3.87014
```
```
#profile and see output of nvptx
!cd ../../source_code/openmp && nsys profile -t nvtx,cuda --stats=true --force-overwrite true -o rdf_gpu ./rdf
```
Let's checkout the profiler's report. [Download the profiler output](../../source_code/openmp/rdf_gpu.qdrep) and open it via the GUI. Have a look at the example expected profiler report below:
<img src="../images/openmp_gpu.png">
If you expand the CUDA row (Timeline view), you can see memory movements as well as Kernels. Checkout the NVTX row and compare the execution time for the `Pair_Calculation` for the multicore version and the GPU offload version. In the *example screenshot*, we were able to reduce the timing from 1.63 seconds to 69.54 mseconds.
# OpenMP Analysis
**Usage Scenarios**
- Legacy codes with sizeable codebase needs to be ported to GPUs with minimal code changes to sequential code.
- Developers want to see if the code structure favors GPU SIMD/SIMT style or as we say test the waters before moving a large piece of code to a GPU.
**Limitations/Constraints**
- Directive based programming model like OpenMP depends on a compiler to understand and convert your sequential code to CUDA constructs. OpenMP compiler with target offload support are evloving and they it cannot match the best performance that say using CUDA C constructs directly can give. Things like controlling execution at warp level or limiting the register counts etc are some of the examples
**Which Compilers Support OpenMP on GPU?**
As of March 2020 here are the compilers that support OpenMP on GPU:
| Compiler | Latest Version | Maintained by | Full or Partial Support |
| --- | --- | --- | --- |
| GCC | 10 | Mentor Graphics | 4.5 partial spec supported |
| CCE| latest | Cray | 4.5 partial spec supported |
| XL | latest | IBM | 4.5 partial spec supported |
| Clang | 9.0 | Community | 4.5 partial spec supported |
| HPC SDK | 21.3 | NVIDIA HPC SDK | 5.0 spec supported |
## Post-Lab Summary
If you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below.
```
%%bash
cd ..
rm -f nways_files.zip
zip -r nways_files.zip *
```
**After** executing the above zip command, you should be able to download the zip file [here](../nways_files.zip). Let us now go back to parallelizing our code using other approaches.
<!--
**IMPORTANT**: If you would like to continue and optimize this application further with OpenMP, please click on the **NEXT** button, otherwise click on **HOME** to go back to the main notebook for *N ways of GPU programming for MD* code.
-->
**IMPORTANT**: Please click on **HOME** to go back to the main notebook for *N ways of GPU programming for MD* code.
-----
# <p style="text-align:center;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em"> <a href=../../../nways_MD_start.ipynb>HOME</a></p>
-----
<!-- <p style="text-align:center;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em"> <a href=../../../nways_MD_start.ipynb>HOME</a> <span style="float:center"> <a href=nways_openmp_opt.ipynb>NEXT</a></span> </p>
-->
# Links and Resources
[OpenMP Programming Model](https://computing.llnl.gov/tutorials/openMP/)
[OpenMP Target Directive](https://www.openmp.org/wp-content/uploads/openmp-examples-4.5.0.pdf)
[NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/)
**NOTE**: To be able to see the Nsight System profiler output, please download Nsight System latest version from [here](https://developer.nvidia.com/nsight-systems).
Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community.
---
## Licensing
This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0).
| github_jupyter |
# Figure 02: Ground state
In this notebook, we show how the data in Figure 2 of the publication
[Beg *et al.* Ground state search, hysteretic behaviour and reversal mechanism of skyrmionic textures in confined helimagnetic nanostructures. *Scientific Reports* **5**, 17137 (2015).](https://doi.org/10.1038/s41598-019-44462-2)
can be computed.
The diagram shows the lowest energy configurations that have been obtained as a function of disk diametre $d$ and applied magnetic field $H$. The two configurations observed are
- (i) a somewhat uniform state with significant twisting towards the boundaries of the disk - this is called an incomplete Skyrmion (iSk) in the publication. The other configuration is
- (ii) an isolated Skyrmion (Sk).
We are going to simulate two points from the parameter space, as shown in the following figure:
<img src="phase-diagram.webp" width=400px>
More precisely, we are going to simulate:
- $(d, \mu_{0}H) = (80\,\text{nm}, 0.2\,\text{T})$ - red dot
- $(d, \mu_{0}H) = (160\,\text{nm}, 0.3\,\text{T})$ - green triangle
The first value is the disk diameter $d$ (thickness is always $10\,\text{nm}$) and the second value is an external magnetic field $H$ applied in the out-of-plane direction.
We are going to relax the system starting from the uniform state. The function to simulate a single point in the parameter space is `parameter_space_point`:
```
import oommfc as oc
import discretisedfield as df
%matplotlib inline
def parameter_space_point(d, B, initial_state):
"""
Expected parameters:
- d : disk diametre [in metres]
- B : applied field [in Tesla]
- initial_state: 'skyrmion' or 'uniform'"""
thickness = 10e-9
cell = (2.5e-9, 2.5e-9, 2.5e-9)
p1 = (-d/2, -d/2, -thickness/2)
p2 = (d/2, d/2, thickness/2)
mesh = oc.Mesh(p1=p1, p2=p2, cell=cell)
D = 1.58e-3
Ms = 3.84e5
A = 8.78e-12
def Ms_fun(pos):
x, y, z = pos
if x**2 + y**2 <= (d/2)**2:
return Ms
else:
return 0
def m_fun(pos):
x, y, z = pos
if x**2 + y**2 <= (d/4)**2:
return (0, 0, -1)
else:
return (0, 0, 1)
if initial_state == 'skyrmion':
value = m_fun
elif initial_state == 'uniform':
value = (0, 0, 1)
system = oc.System(name='parameter-space-point')
system.hamiltonian = oc.Exchange(A=A) + oc.DMI(D=D, crystalclass='T') + oc.Demag() + \
oc.Zeeman(H=(0, 0, B/oc.consts.mu0))
system.m = df.Field(mesh, dim=3, value=value, norm=Ms_fun)
md = oc.MinDriver()
md.drive(system, overwrite=True)
return system
```
## Incomplete skyrmion
We now relax the magnetic configuration for the red dot configuration:
```
system1 = parameter_space_point(80e-9, 0.2, initial_state='uniform')
```
The magnetisation is
```
system1.m.plane(z=0, n=(20, 20)).k3d_vectors(color_field=system1.m.z, head_size=20)
system1.m.plane(z=0, n=(20, 20)).mpl()
# scan magnetisation along line (as in Published figure 2b)
import numpy as np
import pylab
mz_data = []
xs = []
for x in np.linspace(-40e-9, 40e-9, 15):
mz = system1.m([x, 0, 0e-9])[2]
mz_data.append(mz)
xs.append(x)
# plot the scanned data
pylab.plot(xs, mz_data, '-')
pylab.xlabel('x [m]')
pylab.ylabel('Mz [A/m]');
pylab.grid()
```
## Isolated skyrmion
For the second point, the disk diametre is greater, and a skyrmion "fits" into the geometry:
```
system2 = parameter_space_point(160e-9, 0.3, initial_state='skyrmion')
system2.m.plane(z=0, n=(30, 30)).k3d_vectors(color_field=system2.m.z, head_size=40)
system2.m.plane(z=0, n=(20, 20)).mpl()
# scan magnetisation along line
import numpy as np
import pylab
mz_data = []
xs = []
for x in np.linspace(-80e-9, 80e-9, 35):
mz = system2.m([x, 0, 0e-9])[2]
mz_data.append(mz)
xs.append(x)
# plot the scanned data
pylab.plot(xs, mz_data, '-')
pylab.xlabel('x [m]')
pylab.ylabel('Mz [A/m]');
pylab.grid()
```
# More detailed discussion
Please see [figure-02-ground-state-more-details.ipynb](figure-02-ground-state-more-details.ipynb) for a more detailed discussion, including meta stable configurations and computations of the total energy.
| github_jupyter |
# Predicting life of components: A simple Probabilistic Crack propagation example
Predicting the life of a component that is prone to cracking is an age-old problem that has been studied ad-nauseam by the [fracture mechanics community](https://www.google.com/search?tbm=bks&q=fracture+mechanics). [Crack propagation models](https://en.wikipedia.org/wiki/Fracture_mechanics) reside at the core of Prognostics and Health Management (PHM) solutions for engineering systems and the aptly titled book [Prognostics and Health Management of Engineering Systems: An Introduction](https://books.google.com/books?id=pv9RDQAAQBAJ&lpg=PR3&dq=prognostics%20and%20health%20management%20of%20engineering%20systems&pg=PR3#v=onepage&q=prognostics%20and%20health%20management%20of%20engineering%20systems&f=false) provides a great example of how real world data is used to calibrate engineering models, turning them into Digital Twins. The following example is the most straight forward application of combining probabilistic learning techniques and engineering domain models.
>Fabio will add code and example here...
>section prone to tl;dr
The most common form of describing a crack propagation is through the Paris Model for fatigue crack growth, which describes the rate of growth $da/dN$ as a function of crack size $a$ and the stress intensity $\Delta\sigma\sqrt{\pi a}$ related to the loading cycle.
$\frac{da}{dN}=C(\Delta\sigma\sqrt{\pi a})^m$
Which a very clever engineer can integrate to arrive at a analytical formulation for the size of a crack as a function of the loading cycle at application:
$a(N)=[NC(1-\frac{m}2)(\Delta\sigma\sqrt{\pi})^m+a_0^{1-\frac{m}2}]^\frac2{2-m}$
The parameters $C$ and $m$ are then left be be calibrated at each application, given the presence of crack size data. Such data is usually obtained during maintenance and inspections of engineering systems. Luckily enough, the [PHM book by Kim, An and Choi](https://books.google.com/books?id=pv9RDQAAQBAJ&lpg=PR3&dq=prognostics%20and%20health%20management%20of%20engineering%20systems&pg=PR3#v=onepage&q=prognostics%20and%20health%20management%20of%20engineering%20systems&f=false) provides us with such sample dataset. With that in hands, a probabilistic calibration of $C$ and $m$ becomes simple with Tensorflow Probability.
First we need to set up an environment to do so. At BHGE Digital we leverage our [Depend-on-Docker](https://github.com/bhgedigital/depend-on-docker) automation project to wrap the execution of python code in Docker containers. A sample of such automation is available [here](https://github.com/bhgedigital/bayesian_calibration), where a complete version of the following code is available.
Then, within the python environment we need to import some libraries:
```
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
import pandas as pd
tfd = tfp.distributions
tfb = tfp.bijectors
%load_ext autoreload
%autoreload 2
%matplotlib inline
from tensorflow_probability.python.mcmc import util as mcmc_util
import time
import math
```
### Setting up the data
Setting up the data for the calibration comes next. Here we leverage the dataset provided in the Table 4.2 of the [PHM book by Kim, An and Choi](https://books.google.com/books?id=pv9RDQAAQBAJ&lpg=PR3&dq=prognostics%20and%20health%20management%20of%20engineering%20systems&pg=PR3#v=onepage&q=prognostics%20and%20health%20management%20of%20engineering%20systems&f=false):
```
# true values of the parameters
t = np.arange(0,1600, 100) #cycles
y = [0.0100,0.0109,0.0101,0.0107,0.0110,0.0123,0.0099,0.0113,
0.0132,0.0138,0.0148,0.0156,0.0155,0.0141,0.0169,0.0168] # measured crack size data
```
### Priors
With the data set ready, we now define the prior distributions for the calibration variables. In a real application, these priors can be informed by a subject matter expert opinion. For this case, we will assume that both $C$ and $m$ are expected to follow Gaussian distributions.
```
prio_par_C = [-23.0, 1.1] # [location, scale] for Normal Prior
prio_par_m = [4.0, 0.2] # [location, scale] for Normal Prior
rv_m = tfd.Normal(loc = 0.0, scale = 1.0, name = 'm_norm') # Random variable m definition
rv_C = tfd.Normal(loc = 0.0, scale = 1.0, name = 'C_norm') # Random variable logC definition
```
### Log-prob function
We have defined external parameters and Standard Normal distribution for both variables, just to sample from a normalized space. Therefore, we will need to de-normalize both random variables when computing the crack model.
Now we define the joint log probability for the random variables being calibrated and the associated crack model.
```
def joint_logprob(cycles, observations, y0, C_norm, m_norm):
# Joint logProbability function for both random variables and observations.
# Some constants
dsig = 75.0
B = tf.constant(dsig*math.sqrt(math.pi), tf.float32)
# Computing m and logC on original space
C = C_norm*tf.sqrt(prio_par_C[1]) + prio_par_C[0] #
m = m_norm*tf.sqrt(prio_par_m[1]) + prio_par_m[0]
# Crack Propagation model - compute in the log space
y_model = (2.0/(2.0-m))*tf.log(cycles*tf.exp(C)*(1-m/2.0)*tf.pow(B,m) + tf.pow(y0, 1-m/2.0) )
# Defining child model random variable
rv_model = tfd.Independent(tfd.LogNormal(loc = y_model, scale = 0.1),
reinterpreted_batch_ndims=1, name = 'model')
# Sum of logProbabilities
sum_log_prob = rv_C.log_prob(C_norm) + rv_m.log_prob(m_norm) + rv_model.log_prob(observations)
return sum_log_prob
```
### Sampler
Finally, it is time to set up the sampler and run a Tensorflow session:
```
# Number of samples and burnin for the MCMC sampler
samples = 12000
burnin = 8000
# Initial state for the HMC
initial_state = [0.0,0.0]
# Converting the data into tensors
cycles = tf.convert_to_tensor(t,tf.float32)
observations = tf.convert_to_tensor(y,tf.float32)
y0 = tf.convert_to_tensor(y[0], tf.float32)
# Setting up a target posterior for our joint logprobability
unormalized_target_posterior= lambda *args: joint_logprob(cycles, observations, y0, *args)
# And finally setting up the mcmc sampler
[C_samples,m_samples], kernel_results = tfp.mcmc.sample_chain(num_results= samples, num_burnin_steps= burnin,
current_state=initial_state,
kernel= tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unormalized_target_posterior,
step_size = 0.06,
num_leapfrog_steps=6))
# Tracking the acceptance rate for the sampled chain
acceptance_rate = tf.reduce_mean(tf.to_float(kernel_results.is_accepted))
# Actually running the sampler
with tf.Session() as sess:
[C_samples_, m_samples_, acceptance_rate_] = sess.run([C_samples, m_samples, acceptance_rate])
# Some initial results
print('acceptance_rate:', acceptance_rate_)
```
### Plotting Results
If everything has gone according to plan, at this point we will see a acceptance rate of the sampler of around 60%. Pretty good for our first bayesian hierarchical crack propagation model. A key metric for the HMC sampler are the sampled chains themselves, which should look mixed and not like a correlated random walk. In this case, our sampler did pretty good:
```
# plotting the mcmc chains
plt.figure(figsize=(20,10))
plt.plot(np.arange(samples), C_samples_)
plt.title('C samples',fontsize=20)
plt.figure(figsize=(20,10))
plt.plot(np.arange(samples), m_samples_)
plt.title('m samples',fontsize=20)
```
If we collect our sample and rescale them to the original space, we can get some summary statistics on the posterior estimates of the calibrated parameters $\log(C)$ and $m$ and take a look at the distributions:
```
# Converting to proper scale
C_samples_scale = C_samples_*np.sqrt(prio_par_C[1]) + prio_par_C[0]
m_samples_scale = m_samples_*np.sqrt(prio_par_m[1]) + prio_par_m[0]
df = pd.DataFrame(np.concatenate([m_samples_scale[:,None], C_samples_scale[:,None]], axis = 1), columns = ['m', 'logC'])
pd.plotting.scatter_matrix(df,figsize=(7.2,7.2))
df.describe(percentiles=[.05, .95])
```
### Sampling the Posterior for Prognostic
And now for the final act, we shall define a posterior function for our probabilistic crack propagation model, in order to finaly make the prognostic:
```
def posterior(C_samples, m_samples, time):
n_s = len(C_samples)
n_inputs = len(time)
# Some Constants
dsig = 75.0
B = tf.constant(dsig*math.sqrt(math.pi), tf.float32)
# Crack Propagation model - compute in the log space
y_model = (2.0/(2.0-m_samples[None,:]))*tf.log(time[:,None]*tf.exp(C_samples[None,:])*(1-m_samples[None,:]/2.0)*tf.pow(B,m_samples[None,:]) +
tf.pow(y0, 1-m_samples[None,:]/2.0))
#y_model = tf.convert_to_tensor(y_model, tf.float32)
rv_model = tfd.Independent(tfd.LogNormal(loc = y_model, scale = 0.1),
reinterpreted_batch_ndims=1, name = 'model')
samples = rv_model.sample(1)
with tf.Session() as sess:
samples_ = sess.run(samples)
return samples_
time = np.arange(0,3500,100)
y_samples = posterior(C_samples_scale, m_samples_scale, time)[0]
print(y_samples.shape)
lower_per = np.percentile(y_samples,2.5, axis = 1)
upper_per = np.percentile(y_samples,97.5, axis = 1)
plt.figure(figsize =(20,10))
plt.plot(time, np.mean(y_samples,axis=1), 'g', label = 'mean')
plt.plot(t,y,'kx', label = 'Data')
plt.fill_between(time, lower_per, upper_per, label = '95% quartile region', alpha = 0.3)
plt.xlabel('Cycles')
plt.ylabel('Crack size ')
plt.hlines(0.05, np.min(time), np.max(time), linestyles = '--', label = 'threshold')
plt.legend()
```
| github_jupyter |
# Table of Contents
<p><div class="lev1 toc-item"><a href="#Introduction" data-toc-modified-id="Introduction-1"><span class="toc-item-num">1 </span>Introduction</a></div><div class="lev1 toc-item"><a href="#Example" data-toc-modified-id="Example-2"><span class="toc-item-num">2 </span>Example</a></div><div class="lev1 toc-item"><a href="#Data-structures" data-toc-modified-id="Data-structures-3"><span class="toc-item-num">3 </span>Data structures</a></div><div class="lev2 toc-item"><a href="#Series" data-toc-modified-id="Series-31"><span class="toc-item-num">3.1 </span>Series</a></div><div class="lev3 toc-item"><a href="#Attributes-of-a-Series:-index-and-values" data-toc-modified-id="Attributes-of-a-Series:-index-and-values-311"><span class="toc-item-num">3.1.1 </span>Attributes of a Series: <code>index</code> and <code>values</code></a></div><div class="lev2 toc-item"><a href="#DataFrames:-Multi-dimensional-Data" data-toc-modified-id="DataFrames:-Multi-dimensional-Data-32"><span class="toc-item-num">3.2 </span>DataFrames: Multi-dimensional Data</a></div><div class="lev3 toc-item"><a href="#Attributes-of-the-DataFrame" data-toc-modified-id="Attributes-of-the-DataFrame-321"><span class="toc-item-num">3.2.1 </span>Attributes of the DataFrame</a></div><div class="lev1 toc-item"><a href="#Basic-operations-on-Series/Dataframes" data-toc-modified-id="Basic-operations-on-Series/Dataframes-4"><span class="toc-item-num">4 </span>Basic operations on Series/Dataframes</a></div><div class="lev3 toc-item"><a href="#Elementwise-operations-(like-numpy)" data-toc-modified-id="Elementwise-operations-(like-numpy)-401"><span class="toc-item-num">4.0.1 </span>Elementwise-operations (like numpy)</a></div><div class="lev3 toc-item"><a href="#Alignment!-(unlike-numpy)" data-toc-modified-id="Alignment!-(unlike-numpy)-402"><span class="toc-item-num">4.0.2 </span>Alignment! (unlike numpy)</a></div><div class="lev3 toc-item"><a href="#Reductions-(like-numpy)" data-toc-modified-id="Reductions-(like-numpy)-403"><span class="toc-item-num">4.0.3 </span>Reductions (like numpy)</a></div><div class="lev3 toc-item"><a href="#Some-other-useful-methods" data-toc-modified-id="Some-other-useful-methods-404"><span class="toc-item-num">4.0.4 </span>Some other useful methods</a></div><div class="lev2 toc-item"><a href="#Importing-and-exporting-data" data-toc-modified-id="Importing-and-exporting-data-41"><span class="toc-item-num">4.1 </span>Importing and exporting data</a></div><div class="lev2 toc-item"><a href="#Acknowledgement" data-toc-modified-id="Acknowledgement-42"><span class="toc-item-num">4.2 </span>Acknowledgement</a></div>
---
# Introduction
* Jupyter notebook (document and framework)
* Run user interface tour
* Test import and clear output
```
import pandas as pd
%pylab inline
try:
import seaborn # see http://seaborn.pydata.org/tutorial.html
except ImportError:
pass
```
# Example
```
df = pd.read_csv("data/titanic.csv") # or https://raw.githubusercontent.com/AlessandroChecco/pandas-tutorial-solved/master/data/airbase_data.csv
# or df = pd.read_csv('http://pastebin.com/raw/H67fnXSM')
df.head()
```
Starting from reading this dataset, to answering questions about this data in a few lines of code:
**What is the age distribution of the passengers?**
```
df['Age'].hist();
```
**How does the survival rate of the passengers differ between sexes?**
```
df.groupby('Sex')[['Survived']].mean()
```
**Or how does it differ between the different classes?**
```
df.groupby('Pclass').mean().plot.bar(y='Survived',rot=0);
```
**Are young people more likely to survive?**
```
df['Survived'].mean()
df25 = df[df['Age'] <= 25]
df25['Survived'].mean()
```
All the needed functionality for the above examples will be explained throughout this tutorial.
# Data structures
Pandas provides two fundamental data objects, for 1D (``Series``) and 2D data (``DataFrame``).
## Series
A Series is a basic holder for **one-dimensional labeled data**. It can be created much as a NumPy array is created:
```
s = pd.Series([0.1, 0.2, 0.3, 0.4])
s
```
### Attributes of a Series: `index` and `values`
The series has a built-in concept of an **index**, which by default is the numbers *0* through *N - 1*
```
s.index
```
You can access the underlying numpy array representation with the `.values` attribute:
```
s.values
```
We can access series values via the index, just like for NumPy arrays:
```
s[0]
```
Unlike the NumPy array, though, this index can be something other than integers:
```
s2 = pd.Series(np.arange(4), index=['a', 'b', 'c', 'd'])
s2
s2['c']
```
In this way, a ``Series`` object can be thought of as similar to an ordered dictionary mapping one typed value to another typed value.
In fact, it's possible to construct a series directly from a Python dictionary:
```
pop_dict = {'Germany': 81.3,
'Belgium': 11.3,
'France': 64.3,
'United Kingdom': 64.9,
'Netherlands': 16.9}
population = pd.Series(pop_dict)
population
```
We can index the populations like a dict as expected:
```
population['France']
```
but with the power of numpy arrays:
```
population * 1000
```
## DataFrames: Multi-dimensional Data
A DataFrame is a **tablular data structure** (multi-dimensional object to hold labeled data) comprised of rows and columns, akin to a spreadsheet, database table, or R's data.frame object. You can think of it as multiple Series object which share the same index.
<img src="img/dataframe.png" width=110%>
One of the most common ways of creating a dataframe is from a dictionary of arrays or lists.
Note that in the IPython notebook, the dataframe will display in a rich HTML view:
```
data = {'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'],
'population': [11.3, 64.3, 81.3, 16.9, 64.9],
'area': [30510, 671308, 357050, 41526, 244820],
'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']}
countries = pd.DataFrame(data)
countries
```
### Attributes of the DataFrame
A DataFrame has besides a `index` attribute, also a `columns` attribute:
```
countries.index
countries.columns
```
To check the data types of the different columns:
```
countries.dtypes
```
An overview of that information can be given with the `info()` method:
```
countries.info()
```
Also a DataFrame has a `values` attribute, but attention: when you have heterogeneous data, all values will be upcasted:
```
countries.values
```
If we don't like what the index looks like, we can reset it and set one of our columns:
```
countries = countries.set_index('country')
countries
```
To access a Series representing a column in the data, use typical indexing syntax:
```
countries['area']
```
# Basic operations on Series/Dataframes
As you play around with DataFrames, you'll notice that many operations which work on NumPy arrays will also work on dataframes.
```
population = pd.Series({'Germany': 81.3, 'Belgium': 11.3, 'France': 64.3,
'United Kingdom': 64.9, 'Netherlands': 16.9})
countries = pd.DataFrame({'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'],
'population': [11.3, 64.3, 81.3, 16.9, 64.9],
'area': [30510, 671308, 357050, 41526, 244820],
'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']})
```
### Elementwise-operations (like numpy)
Just like with numpy arrays, many operations are element-wise:
```
population / 100
countries['population'] / countries['area']
```
### Alignment! (unlike numpy)
Only, pay attention to **alignment**: operations between series will align on the index:
```
s1 = population[['Belgium', 'France']]
s2 = population[['France', 'Germany']]
s1
s2
s1 + s2
```
### Reductions (like numpy)
The average population number:
```
population.mean()
```
The minimum area:
```
countries['area'].min()
```
For dataframes, often only the numeric columns are included in the result:
```
countries.median()
```
<div class="alert alert-success">
<b>EXERCISE</b>: Calculate the population numbers relative to Belgium
</div>
<div class="alert alert-success">
<b>EXERCISE</b>: Calculate the population density for each country and add this as a new column to the dataframe.
</div>
### Some other useful methods
Sorting the rows of the DataFrame according to the values in a column:
```
countries.sort_values('density', ascending=False)
```
One useful method to use is the ``describe`` method, which computes summary statistics for each column:
```
countries.describe()
```
The `plot` method can be used to quickly visualize the data in different ways:
```
countries.plot();
```
However, for this dataset, it does not say that much:
```
countries['population'].plot(kind='bar',rot=0);
```
You can play with the `kind` keyword: 'line', 'bar', 'hist', 'density', 'area', 'pie', 'scatter', 'hexbin'
## Importing and exporting data
A wide range of input/output formats are natively supported by pandas:
* CSV, text
* SQL database
* Excel
* HDF5
* json
* html
* pickle
* ...
```
pd.read
countries.to
```
## Acknowledgement
> *© 2015, Stijn Van Hoey and Joris Van den Bossche. Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*
> This notebook is partly based on material of Jake Vanderplas (https://github.com/jakevdp/OsloWorkshop2014).
---
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from PIL import Image
import os
from pathlib import Path
```
# Read `png` file and put in `numpy` array
## Get file name
```
image_directory = Path.cwd().joinpath("images")
test_file = list(image_directory.glob("*.png"))[0]
print(test_file)
```
## Read image and convert to numpy array
```
pil_img = Image.open(test_file)
print("PIL info")
print("size:", pil_img.size)
print("mode:", pil_img.mode)
print("")
np_img = np.array(pil_img)
print("numpy array info")
print("shape:", np_img.shape)
print("dtype:", np_img.dtype)
print("min, max:", np.min(np_img), np.max(np_img))
```
## Show images
### Numpy array
```
fig, ax = plt.subplots()
ax.imshow(np_img, interpolation=None, vmin=0, vmax=255);
```
### PIL image
```
fig, ax = plt.subplots()
ax.imshow(pil_img);
pil_img
```
# Draw border with slice object
```
def roi_border(img, roi, border_value=0, border_width=1):
"""
Draw border on image showing extent of region-of-interest.
Parameters
----------
img : 2D numpy array for grayscale images or 3D [:,:,3] numpy
array for color images
image on which to draw border
roi : tuple with two numpy slice objects
region-of-interest that defines border
border_value : int, float, or 3-element array-like, optional
color (gray level) for border
border_width: int, optional
width of border, drawn on boundary with width toward *interior*
of region-of-interest
"""
xslice, yslice = roi[1], roi[0]
xstart, xstop = xslice.start, xslice.stop
ystart, ystop = yslice.start, yslice.stop
img[ystart:ystart+border_width, xstart:xstop] = border_value
img[ystop-border_width:ystop, xstart:xstop] = border_value
img[ystart:ystop, xstart:xstart+border_width] = border_value
img[ystart:ystop, xstop-border_width:xstop] = border_value
x1, y1 = 100, 50
w1, h1 = 200, 125
roi_1 = np.s_[y1:y1+h1, x1:x1+w1]
x2, y2 = 600, 400
w2, h2 = 300, 250
roi_2 = np.s_[y2:y2+h2, x2:x2+w2]
temp_np_img = np_img.copy()
roi_border(temp_np_img, roi_1, border_width=5)
roi_border(temp_np_img, roi_2, border_value=(255, 15, 255), border_width=3)
fig, ax = plt.subplots(figsize=(8,6))
ax.imshow(temp_np_img, interpolation=None, vmin=0, vmax=255);
```
# Using ipywidgets, `interact`, and custom color map with an image
## Imports
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
%matplotlib inline
from PIL import Image
from ipywidgets import interact, fixed
import ipywidgets as widgets
```
## Specific functions to generate image
```
# See https://en.wikipedia.org/wiki/Gaussian_beam
def beam_waist(z, w0, wavelength_um):
zR = np.pi * w0**2 / wavelength_um
w_z = w0 * np.sqrt(1 + (z / zR)**2)
# print(zR, w_z)
return w_z
def gaussian_beam(x, z, w0, wavelength_um):
w_z = beam_waist(z, w0, wavelength_um)
temp = (w0 / w_z)**2
temp *= np.exp(-2 * x**2 / w_z**2)
return temp
def absorption(z, ha):
assert z >= 0.0
return np.exp(-z/ha)
```
## Custom color map from a scaling function
```
def scaled_intensity(intensity):
assert np.amax(intensity) <= 1.0
assert np.amin(intensity) >= 0.0
temp = np.copy(intensity)
temp[temp == 0] = 1.e-6
temp = 1.0 + np.log10(temp) / 2
temp[temp < 0] = 0
return temp
def make_scaled_colormap(scale_function):
# See ImportanceOfBeingErnest's answer at https://stackoverflow.com/questions/16834861/create-own-colormap-using-matplotlib-and-plot-color-scale
cvals = np.linspace(0, 1)
color_values = scaled_intensity(cvals)
colors = [(c, c, c) for c in color_values]
tuples = list(zip(cvals, colors))
return matplotlib.colors.LinearSegmentedColormap.from_list("", tuples)
cmap_scaled = make_scaled_colormap(scaled_intensity)
```
## Function to use with `interact`
```
def plot_gaussian_beam(pixel_um=7.6,
wavelength_um=0.365,
ha=12.0,
aspect_equal=True,
attenuate_in_resin=False,
cmap_name='linear',
):
min_beam_waist_um = pixel_um/2
aspect = 'equal' if aspect_equal else 'auto'
gaussian_image = np.zeros((1001,101))
gaussian_image.shape
center = (int(gaussian_image.shape[0]/2), int(gaussian_image.shape[1]/2))
# print(center)
# Create Gaussian beam image
x = np.arange(-center[1], center[1] + 1)
for z_index in range(0, gaussian_image.shape[0]):
z = z_index - center[0]
# print(z_index, z)
gaussian_image[z+center[0], :] = gaussian_beam(x, z, min_beam_waist_um, wavelength_um)
if attenuate_in_resin:
# Gaussian beam attenuated by resin absorption
attenuated_gaussian = np.copy(gaussian_image)
for z_index in range(center[0], attenuated_gaussian.shape[0]):
z = z_index - center[0]
attenuated_gaussian[z_index, :] *= absorption(z, ha)
else:
attenuated_gaussian = np.copy(gaussian_image)
# Show features of interest
# Indicate start of resin region
attenuated_gaussian[center[0], :] = 1.0
# Indicate ha depth into resin region
attenuated_gaussian[int(center[0] + ha), :] = 0.5
if cmap_name == 'linear':
cmap = 'gray'
elif cmap_name == 'scaled':
cmap = cmap_scaled
else:
raise ValueError(f"Color map {cmap_name} is undefined")
fig, ax = plt.subplots(figsize=(12,12))
im = ax.imshow(attenuated_gaussian, cmap=cmap, vmin=0, vmax=1, aspect=aspect, interpolation=None,
origin='lower', extent=[-center[1], center[1], -center[0], center[0]]);
ax.set_xlabel("x (microns)")
ax.set_ylabel("z (microns)")
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel('Normalized Intensity', rotation=-90, va="bottom")
```
## Use with `interact`
```
interact(plot_gaussian_beam,
pixel_um=fixed(7.6),
wavelength_um=fixed(0.365),
ha=fixed(12.0),
aspect_equal=True,
attenuate_in_resin=False,
cmap_name=widgets.RadioButtons(
options=['linear', 'scaled'],
value='linear',
description='Color map',
disabled=False
)
);
```
| github_jupyter |
# Point sources #
In astromodels, a point source is described by its position in the sky
and its spectral features.
## Creating a point source
A simple source with a power law spectrum can be created like this, using J2000 R.A. and Dec (ICRS), which is the default coordinate system:
```
from astromodels import *
simple_source_icrs = PointSource('simple_source', ra=123.2, dec=-13.2, spectral_shape=Powerlaw())
```
We can also use Galactic coordinates:
```
simple_source_gal = PointSource('simple_source', l=234.320573, b=11.365142, spectral_shape=Powerlaw())
```
As spectral shape we can use any function or any composite function (see
"Creating and modifying functions")
## Getting info about a point source
Info about a point source can be obtained with the
`.display()` method (which will use the richest representation available),
or by printing it which will display a text-only representation:
```
simple_source_icrs.display()
print(simple_source_icrs)
```
As you can see we have created a point source with one component automatically named "main", with a power law spectrum, at the
specified position.
## Converting between coordinate systems
By default the coordinates of the point source are displayed in the same
system used during creation. However, you can always obtain R.A, Dec or
L,B like this:
```
simple_source_icrs.position.display()
l = simple_source_icrs.position.get_l()
b = simple_source_icrs.position.get_b()
print(l,b)
simple_source_gal.position.display()
ra = simple_source_gal.position.get_ra()
dec = simple_source_gal.position.get_dec()
print(ra,dec)
```
For more control on the output and many more options, such as transform
to local frames or other equinoxes, you can obtain an instance of
[astropy.coordinates.SkyCoord](http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html) by using the `sky_coord` property of the
position object:
```
sky_coord_instance = simple_source_icrs.position.sky_coord
ra = sky_coord_instance.transform_to('icrs').ra
dec = sky_coord_instance.transform_to('icrs').dec
print(ra.deg)
```
## Gotcha while accessing coordinates
Please note that using `get_ra()` and `.ra` (or the equivalent methods for
the other coordinates) is not the same. While `get_ra()` will always
return a single float value corresponding to the R.A. of the source, the
`.ra` property will exist only if the source has been created using R.A,
Dec as input coordinates and will return a Parameter instance:
```
parameter_ra = simple_source_icrs.position.ra
parameter_dec = simple_source_icrs.position.dec
print( type(parameter_ra) )
parameter_ra.display()
parameter_dec.display()
```
The following would instead throw `AttributeError`, since `simple_source_icrs` was instanced using R.A. and Dec. and hence does not have the `l`, `b` parameters:
```
try:
print( simple_source_icrs.position.l)
except Exception as e:
print(e)
```
In all cases, independently on how the source was instanced, you can obtain the *values* of coordinates in degrees
as floating point numbers using `get_ra()`, `get_dec()`, `get_l()`, `get_b()`. However, you can only directly *assign* coordinates in the same system that the source direction was originally created, e.g.:
```
simple_source_icrs.position.display()
simple_source_icrs.position.ra = simple_source_icrs.position.ra.value + 1.0
simple_source_icrs.position.dec = simple_source_icrs.position.dec.value - 1.0
simple_source_icrs.position.display()
```
## Fitting the source position
Source coordinates, like any parameters, can be set to be free or fixed during the fit. By default, coordinates are set to be fixed. If you would like to fit them as free parameters during the likelihood fit, they can be freed as any other parameter. Note that `param.free = True` and `param.fix = False` are equivalent.
```
print("Free parameters (before freeing position):", simple_source_icrs.free_parameters.keys())
simple_source_icrs.position.ra.free = True
simple_source_icrs.position.dec.fix = False
print("Free parameters (after freeing position):", simple_source_icrs.free_parameters.keys())
```
For a source created in Galactic coordinates, instead use the following:
```
print("Free parameters (before freeing position):", simple_source_gal.free_parameters.keys())
simple_source_gal.position.l.free = True
simple_source_gal.position.b.fix = False
print("Free parameters (after freeing position):", simple_source_gal.free_parameters.keys())
```
By default, the allowed range for the Right Ascension is from 0˚ to 360˚ and allowed declination values range from -90˚ to 90˚. **If fitting the source position, it is strongly recommended to restrict the coordinates to be inside the region of interest (ROI) at all times.** The source moving far enough from the ROI during the fit can lead to issues such as the minimizer getting "stuck" due to the likelihood surface being flat. For example:
```
simple_source_icrs.position.ra.bounds = ( simple_source_icrs.position.ra.value - 5.0, simple_source_icrs.position.ra.value + 5.0 )
simple_source_icrs.position.dec.bounds = ( simple_source_icrs.position.dec.value - 5.0, simple_source_icrs.position.dec.value + 5.0 )
simple_source_icrs.position.ra.display()
simple_source_icrs.position.dec.display()
```
## "Calling" a point source
Both the point source object itself as well as the compontents are callable functions who take as argument(s) an array of energies and return the differential flux dN/dE at those energies. Energies can be provided with or without units. **If no units are provided, energies are assumed to be in keV and fluxes are returned in 1/(cm2 keV s).**
```
from astropy import units as u
E = [1, 10, 100]*u.keV
print("Energy in keV:")
print( "With units:", simple_source_icrs(E) )
print( "Without units:", simple_source_icrs(E.value) )
print( "With units:", simple_source_icrs.spectrum.main.shape(E) )
print( "Without units:", simple_source_icrs.spectrum.main.shape(E.value) )
print("")
print("Energy in TeV:")
E_TeV = E.to(u.TeV)
print( "With units:", simple_source_icrs(E_TeV) )
print( "With units:", simple_source_icrs(E_TeV).to(1/u.cm**2/u.TeV/u.s) )
print( "Without units:", simple_source_icrs(E_TeV.value) )
```
| github_jupyter |
# ML Pipeline Preparation
Follow the instructions below to help you create your ML pipeline.
### 1. Import libraries and load data from database.
- Import Python libraries
- Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
- Define feature and target variables X and Y
```
# import libraries
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
import pandas as pd
pd.set_option('display.max_columns', 100)
import os
import re
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sqlalchemy import create_engine
# load data from database
engine = create_engine('sqlite:///DisasterResponse.db')
df = pd.read_sql_table('DisasterResponse_table', engine)
# display loaded dataframe
df
# have a quick overview on the dataset
df.describe()
# a quick check on the max value for each columns
df.max()
# a quick check on the min value for each columns
df.min()
# we remove 'child_alone' column as it has only 0 (ZERO) values.
df = df.drop('child_alone', axis = 1)
# maximum value for 'related' column is '2'. Let us investigate further...
df.groupby('related').count()
```
### Note:
From the results above, '2' could be error. We can replace it with '0' or '1'. In this case, I will replace it with '1'
```
df['related'] = df['related'].map(lambda x: 1 if x==2 else x)
# check if all '2' have been replace with '1'
df.groupby('related').count()
# separate the dataset to input variables (X) and target variables (y)
X = df['message']
y = df.iloc[:,4:]
```
### 2. Write a tokenization function to process your text data
```
def tokenize(text):
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
```
### Implement the StartingVerbExtractor class
Create custom transformer to be used for FeatureUnion later in the ML pipeline.
```
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
```
### 3. Build a machine learning pipeline
This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
```
def ML_pipeline(clf = AdaBoostClassifier()):
pipeline = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
]))
])),
('clf', MultiOutputClassifier(clf))
])
return pipeline
# include StartingVerbExtractor custom transformer
def ML_pipeline_2(clf = AdaBoostClassifier()):
pipeline = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('starting_verb', StartingVerbExtractor())
])),
('clf', MultiOutputClassifier(clf))
])
return pipeline
```
### 4. Train pipeline
- Split data into train and test sets
- Train pipeline
```
X_train, X_test, y_train, y_test = train_test_split(X, y)
model = ML_pipeline()
model.fit(X_train, y_train)
```
### 5. Test your model
Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.
```
y_pred_test = model.predict(X_test)
# classification report on test data
print(classification_report(y_test.values, y_pred_test, target_names=y.columns.values))
```
### 6. Improve your model
Use grid search to find better parameters.
```
# get list of model parameters
model.get_params().keys()
model_2 = ML_pipeline()
# Model training using GridSearchCV is computational extensive task.
# Computational time increases as we increase the number of parameters.
# In view of that, only a few parameters are chosen to demonstrate the concept.
# To obtain a highly optimized model, we need to increase the number of parameters.
parameters = {
'clf__estimator__learning_rate': [0.5, 1.0],
'clf__estimator__n_estimators': [10, 20]
}
cv = GridSearchCV(model_2, param_grid=parameters, cv=5, n_jobs=-1, verbose=3)
# verbose=3 to get real time training progress
# n_jobs=-1 -> to train in parallel across the maximum number of cores in our computer, spending it up.
# cv=5 -> 5-fold cross validation
cv.fit(X_train, y_train)
```
### 7. Test your model
Show the accuracy, precision, and recall of the tuned model.
Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
```
y_pred_test = cv.predict(X_test)
# classification report on test data
print(classification_report(y_test.values, y_pred_test, target_names=y.columns.values))
```
### 8. Try improving your model further. Here are a few ideas:
* try other machine learning algorithms
* add other features besides the TF-IDF
```
# try other model -> RandomForestClassifier
rf_model = ML_pipeline(clf = RandomForestClassifier())
rf_model.fit(X_train, y_train)
y_pred_rf_test = rf_model.predict(X_test)
print(classification_report(y_test.values, y_pred_rf_test, target_names=y.columns.values))
# we can also try using ML_pipeline_2 that includes custom transformer of 'StartingVerbEstimator'
model_3 = ML_pipeline_2()
model_3.fit(X_train, y_train)
y_pred_3_test = model_3.predict(X_test)
print(classification_report(y_test.values, y_pred_3_test, target_names=y.columns.values))
```
### 9. Export your model as a pickle file
```
# save model in pickle file
with open('classifier.pkl', 'wb') as f:
pickle.dump(model_3, f)
```
### 10. Use this notebook to complete `train.py`
Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
import pylab as pl
data = pd.read_csv('data/HR_comma_sep.csv')
data.head()
data.columns
x = data[['satisfaction_level', 'average_montly_hours', 'promotion_last_5years', 'salary']]
x.head()
x['salary'] = x.salary.map({'low' : 0, 'medium' : 1, 'high' : 2})
x['salary']
x.head()
y = data['left']
y.head()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 4)
x_train.shape
x_test.shape
y_train.shape
y_test.shape
from sklearn.linear_model import LogisticRegression
regr = LogisticRegression(solver = 'lbfgs', C = 0.001)
regr.fit(x_train, y_train)
y_test.head()
y_pred = regr.predict(x_test)
y_pred[0:5]
y_pred_proba = regr.predict_proba(x_test)
y_pred_proba
a_1 = regr.score(x_train, y_train)
a_2 = regr.score(x_test, y_test)
a_3 = regr.score(x_test, y_pred)
from sklearn.metrics import jaccard_similarity_score, accuracy_score, log_loss, f1_score, classification_report, confusion_matrix
a_4 = jaccard_similarity_score(y_test, y_pred)
a_5 = accuracy_score(y_test, y_pred)
a_6 = log_loss(y_test, y_pred)
a_7 = f1_score(y_test, y_pred)
print(classification_report(y_test, y_pred))
from sklearn import neighbors
knn = neighbors.KNeighborsClassifier(n_neighbors = 1)
knn.fit(x_train, y_train)
b_1 = knn.score(x_train, y_train)
b_2 = knn.score(x_test, y_test)
y_test.head()
yhat = knn.predict(x_test)
yhat[0:5]
b_3 = knn.score(x_test, y_pred)
b_4 = jaccard_similarity_score(y_test, yhat)
b_5 = accuracy_score(y_test, yhat)
print(classification_report(y_test, yhat))
b_6 = log_loss(y_test, yhat)
b_7 = f1_score(y_test, yhat)
from sklearn import svm
clf = svm.SVC(kernel = 'rbf', gamma = 'auto')
clf.fit(x_train, y_train)
y_test.head()
y_pred_1 = clf.predict(x_test)
y_pred_1[0:5]
c_1 = clf.score(x_train, y_train)
c_2 = clf.score(x_test, y_test)
c_3 = clf.score(x_test, y_pred_1)
c_4 = jaccard_similarity_score(y_test, y_pred_1)
c_5 = accuracy_score(y_test, y_pred_1)
c_7 = f1_score(y_test, y_pred_1)
print(classification_report(y_test, y_pred_1))
c_6 = log_loss(y_test, y_pred_1)
from sklearn import tree
clf_1 = tree.DecisionTreeClassifier()
clf_1.fit(x_train, y_train)
y_test.head()
yhat_1 = clf_1.predict(x_test)
yhat_1[0:5]
d_1 = clf_1.score(x_train, y_train)
d_2 = clf_1.score(x_test, y_test)
d_3 = clf_1.score(x_test, y_pred)
print(classification_report(y_test, yhat_1))
d_7 = f1_score(y_test, yhat_1)
d_6 = log_loss(y_test, yhat_1)
d_5 = accuracy_score(y_test, yhat_1)
d_4 = jaccard_similarity_score(y_test, yhat_1)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators = 1000)
rf.fit(x_train, y_train)
y_test[0:5]
y_pred_2 = rf.predict(x_test)
y_pred_2[0:5]
e_1 = rf.score(x_train, y_train)
e_1
e_2 = rf.score(x_test, y_test)
e_2
e_3 = rf.score(x_test, y_pred_2)
e_3
e_4 = jaccard_similarity_score(y_test, y_pred_2)
e_4
e_5 = accuracy_score(y_test, y_pred_2)
e_5
e_6 = log_loss(y_test, y_pred_2)
e_6
e_7 = f1_score(y_test, y_pred_2)
e_7
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
gsn = GaussianNB()
gsn.fit(x_train, y_train)
y_test[0:5]
yhat_2 = gsn.predict(x_test)
yhat_2[0:5]
f_1 = gsn.score(x_train, y_train)
f_1
f_2 = gsn.score(x_test, y_test)
f_2
f_3 = gsn.score(x_test, yhat_2)
f_3
f_4 = jaccard_similarity_score(y_test, yhat_2)
f_4
f_5 = accuracy_score(y_test, yhat_2)
f_5
f_6 = log_loss(y_test, yhat_2)
f_6
f_7 = f1_score(y_test, yhat_2)
f_7
mul = MultinomialNB()
mul.fit(x_train, y_train)
y_test[0:5]
y_pred_3 = mul.predict(x_test)
y_pred_3[0:5]
g_1 = mul.score(x_train, y_train)
g_1
g_2 = mul.score(x_test, y_test)
g_2
g_3 = mul.score(x_test, y_pred_3)
g_3
g_4 = jaccard_similarity_score(y_test, y_pred_3)
g_4
g_5 = accuracy_score(y_test, y_pred_3)
g_5
g_6 = log_loss(y_test, y_pred_3)
g_6
g_7 = f1_score(y_test, y_pred_3)
g_7
ber = BernoulliNB()
ber.fit(x_train, y_train)
y_test[0:5]
yhat_3 = ber.predict(x_test)
yhat_3[0:5]
h_1 = ber.score(x_train, y_train)
h_1
h_2 = ber.score(x_test, y_test)
h_2
h_3 = ber.score(x_test, yhat_3)
h_3
h_4 = jaccard_similarity_score(y_test, yhat_3)
h_4
h_5 = accuracy_score(y_test, yhat_3)
h_5
h_6 = log_loss(y_test, yhat_3)
h_6
h_7 = f1_score(y_test, yhat_3)
h_7
df = pd.DataFrame({'Train_score' : [a_1, b_1, c_1, d_1, e_1, f_1, g_1, h_1],
'Test_Score' : [a_2, b_2, c_2, d_2, e_2, f_2, g_2, h_2],
'Predicted_Score' : [a_3, b_3, c_3, d_3, e_3, f_3, g_3, h_3],
'Jaccard_Similarity_Score' : [a_4, b_4, c_4, d_4, e_4, f_4, g_4, h_4],
'accuracy_score' : [a_5, b_5, c_5, d_5, e_5, f_5, g_5, h_5],
'log_loss' : [a_6, b_6, c_6, d_6, e_6, f_6, g_6, h_6],
'f1_score' : [a_7, b_7, c_7, d_7, e_7, f_7, g_7, h_7]}, index = ['Logistic_Regression', 'KNN', 'SVM', 'Decision_Tree', 'Random_Forest', 'GaussianNB', 'MultinomialNB', 'BernoulliNB'])
df
```
| github_jupyter |
```
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
import json
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
## allow the import of the MovieData class
import sys
import os
path = os.path.abspath(os.getcwd()) + '/../data_load'
sys.path.insert(0, path)
from movie_data import MovieData
%matplotlib inline
moviedata = MovieData(min_genre_frequency=0.1)
print(moviedata.genre_labels)
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
```
# LDA
```
n_features = 5000
n_top_words = 20
count_vectorizor = CountVectorizer(
max_df=0.95,
min_df=2,
max_features=n_features,
stop_words='english'
)
count = count_vectorizor.fit_transform(moviedata.plots)
count_feature_names = count_vectorizor.get_feature_names()
lda = LatentDirichletAllocation(
n_topics=8, max_iter=15,
learning_method='online',
learning_offset=50.,
random_state=0
)
Z = lda.fit_transform(count)
print_top_words(lda, count_feature_names, n_top_words)
```
# TF-IDF
```
vectorizer = TfidfVectorizer(
max_df=0.7,
analyzer='word',
ngram_range=(1, 1),
max_features=n_features,
stop_words='english')
X = vectorizer.fit_transform(moviedata.plots)
# vectorizer.get_feature_names()
pca = PCA(n_components=200)
XX = pca.fit_transform(X.todense())
```
## Combined
```
combined_features = np.hstack([Z, XX])
X_train, X_test, y_train, y_test = train_test_split(
combined_features,
moviedata.one_hot_genres,
test_size=0.4,
random_state=42
)
```
# Linear Regression
```
genre_coeffs = []
scores = []
for i, label in enumerate(moviedata.genre_labels):
coeffs = np.linalg.lstsq(X_train, y_train[:, i])[0]
genre_coeffs.append(coeffs)
y_pred_probs = np.dot(X_test, coeffs)
y_pred = np.array(y_pred_probs > 0.25, dtype=int)
score = f1_score(y_test[:, i], y_pred)
scores.append(score)
print("Genre: {}, Score: {:.2f}".format(label, score))
print('Mean f1 score: {:.3f}'.format(np.mean(scores)))
```
# Logistic Regression
```
scores = []
for i, label in enumerate(moviedata.genre_labels):
clf = LogisticRegression()
clf.fit(X_train, y_train[:, i])
y_pred_probs = clf.predict_proba(X_test)[:, 1]
plt.hist(y_pred_probs)
y_pred = np.array(y_pred_probs > np.mean(y_pred_probs), dtype=int)
score = f1_score(y_test[:, i], y_pred)
scores.append(score)
print("Genre: {}, Score: {:.2f}".format(label, score))
print('Mean f1 score: {:.3f}'.format(np.mean(scores)))
```
# Random Forrest
```
clf = RandomForestClassifier(n_estimators=25, max_features='auto', max_depth=None)
clf.fit(X_train, y_train)
y_guess_probs = clf.predict_proba(X_test)
y_guess = clf.predict(X_test)
scores = f1_score(y_test, y_guess, average=None)
print(scores)
print(np.mean(scores))
# guess randomly
np.mean(f1_score(y_test, np.random.randint(0, 2, y_test.shape), average=None))
y_train[:, 0]
```
| github_jupyter |
## Amino acid titration explorer
## Instructions
* **<font color="red">Go to "Cell->Run All" to start the program running. After that point, you should be able to use the sliders and buttons to manipulate the output.</font>**
* If things go totally awry, you can go to "Kernel->Restart" and then "Cell->Run All". A more drastic solution would be to close and reload the page, which will reset the code to its initial state.
* If you're interested in programming, click the "Toggle raw code" button. This will expose the underlying program, written in the Python 3 programming language. You can edit the code to your heart's content: just go to "Cell->Run All" after you modify things so the changes will be incorporated. Text in the code blocks preceded by `#` are comments to guide you through the excercise and/or explain the code
```
# -----------------------------------------------------------------------------------
# Javascript that gives us a cool hide-the-code button
from IPython.display import HTML
HTML('''
<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()">
<input type="submit" value="Toggle raw code">
</form>
''')
# ------------------------------------------------------------------------------------
#Import libraries that do things like plot data and handle arrays
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
# libraries for making pretty sliders
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display
def fractional_protonation(pKa,pH):
"""
Calculate the protonation state of a titratable group versus pH given its pKa.
"""
theta_protonated = 1/(1 + 10**(pH-pKa))
return theta_protonated
def fractional_charge(pKa,charge_when_protonated,pH):
"""
Cacluate the fractional charge on a molecule given its pKa value, charge when ionized, and pH
"""
theta_protonated = 1/(1 + 10**(pH-pKa))
if charge_when_protonated == 0:
theta_charge = -1*(1-theta_protonated)
else:
theta_charge = theta_protonated
return theta_charge
def titrate_amino_acid(sidechain_pKa=4,charge_when_protonated=0,titratable_sidechain=True):
"""
Calculate the total charge on a free amino acid as a function of pH.
"""
# N- and C-terminal groups
pKas = [9.0,3.5]
charges = [1,0]
# Are we adding a titratable sidec chain?
if titratable_sidechain == True:
pKas.append(sidechain_pKa)
charges.append(charge_when_protonated)
# Create a vector of pH values and a vector of zeros to hold total charge state vs. pH
pH_list = np.arange(0,14,0.25)
total_charge = np.zeros(len(pH_list))
total_protonation = np.zeros(len(pH_list))
# For every titratable group, calculate charge vs. pH and append to the total charge
for i in range(len(pKas)):
total_charge = total_charge + fractional_charge(pKas[i],charges[i],pH_list)
total_protonation = total_protonation + fractional_protonation(pKas[i],pH_list)
fig, ax = plt.subplots(1,2)
ax[0].plot(pH_list,total_protonation,color="black")
ax[0].axhline(y=0,color="gray",linestyle="dashed")
ax[0].set_xlabel("pH")
ax[0].set_ylabel("total protonation")
ax[0].set_title("protonation state")
ax[1].plot(pH_list,total_charge,color="green")
ax[1].axhline(y=0,color="gray",linestyle="dashed")
ax[1].set_xlabel("pH")
ax[1].set_ylabel("total charge")
ax[1].set_title("charge state")
fig.set_figwidth(10)
fig.tight_layout()
plt.show()
titratable_sc_widget = widgets.Checkbox(description="amino acid sidechain titrable?",value=True)
pKa_widget = widgets.FloatText(description="pKa of sidechain",value=4.5)
charge_widget = widgets.IntSlider(description="charge of protonated sidechain",min=0,max=1,step=1,value=0)
container = widgets.interactive(titrate_amino_acid,
titratable_sidechain=titratable_sc_widget,
sidechain_pKa=pKa_widget,
charge_when_protonated=charge_widget)
display(container)
```
# Appendix: the Henderson-Hasselbalch Equation and Fractional Charge
## Derive HH:
Start with the definition of an acid dissocation constant:
$$\frac{[H^{+}][A]}{[HA]}=K_{acid}$$
Rearrange and take the $-log_{10}$ of both sides:
$$[H^{+}]=\frac{K_{acid}[HA]}{[A]}$$
$$-log_{10}([H^{+}]) = -log_{10}\Big(\frac{K_{acid}[HA]}{[A]}\Big)$$
Apply the log rule that $log(XY) = log(X) + log(Y)$:
$$-log_{10}([H^{+}]) = -log_{10}(K_{acid}) -log_{10}\Big(\frac{[HA]}{[A]}\Big)$$
Recalling that $pX \equiv -log_{10}(X)$ we can write:
$$pH = pK_{a} - log_{10} \Big (\frac{[HA]}{[A]} \Big)$$
Then apply the log rule that $ -log(X) = log(1/X)$ to get:
$$pH = pK_{a} + log_{10} \Big (\frac{[A]}{[HA]} \Big)$$
This is the Henderson-Hasselbalch equation.
## Derive fractional protonation
Now let's think about $\theta$, the fraction of some molecule $A$ that is protonated as a function of $pH$. This is simply the concentration of protonated molecules ($[HA]$) over all possible molecules:
$$\theta \equiv \frac{[HA]}{[HA] + [A]}$$
We can rearrange Henderson-Hasselbalch to solve for $[A]$:
$$pH - pK_{a} = log_{10} \Big (\frac{[A]}{[HA]} \Big)$$
$$10^{(pH-pK_{a})} = \frac{[A]}{[HA]}$$
$$[HA] 10^{(pH-pK_{a})} = [A]$$
And then substitute into the equation for $\theta$:
$$\theta = \frac{[HA]}{[HA] + [HA] 10^{(pH-pK_{a})}}$$
$$\theta = \frac{1}{1 + 10^{(pH-pK_{a})}}$$
We now have an equation that relates the $pK_{a}$ and $pH$ to the saturation of a molecule.
## Relate fractional protonation to fractional charge
To relate fractional protonation to the fractional charge, we need to know some chemistry.
For example, a protonated carboxylic acid ($R-COOH$) is neutral, while a protonated amine ($NH^{+}_{4}$) is charged. If you know the chemical structures of your amino acids, you should be able to reason about charge vs. pH given information about _protonation_ vs. pH. The titration behaviors of the groups that titrate at reasonable pH values are shown below:
**Charge on protonated state = 0**
Aspartic acid/glutamic acid/C-terminus ($pK_{a} \approx 2-4$): $R-COOH \rightleftharpoons \color{red}{R-COO^{-}} + \color{blue}{H^{+}}$
Tyrosine ($pK_{a} = 10.5 $): $R-OH \rightleftharpoons \color{red}{R-O^{-}} + \color{blue}{H^{+}}$
Cysteine ($pK_{a} = 8.4 $): $R-SH \rightleftharpoons \color{red}{R-S^{-}} + \color{blue}{H^{+}}$
**Charge on protonated state = 1**
Lysine/N-terminus ($pK_{a} \approx 10 $): $\color{blue}{R-NH^{+}_{3}} \rightleftharpoons R-NH_{2} + \color{blue}{H^{+}}$
Histidine ($pK_{a} = 6.0 $): $\color{blue}{R-C_{3}H_{4}N_{2}^{+}} \rightleftharpoons R-C_{3}H_{3}N_{2} + \color{blue}{H^{+}}$
Arginine ($pK_{a} = 12.5 $): $\color{blue}{R-C_{1}H_{5}N_{3}^{+}} \rightleftharpoons R-C_{1}H_{4}N_{3} + \color{blue}{H^{+}}$
| github_jupyter |
# Distributed Training with GPUs on Cloud AI Platform
**Learning Objectives:**
1. Setting up the environment
1. Create a model to train locally
1. Train on multiple GPUs/CPUs with MultiWorkerMirrored Strategy
In this notebook, we will walk through using Cloud AI Platform to perform distributed training using the `MirroredStrategy` found within `tf.keras`. This strategy will allow us to use the synchronous AllReduce strategy on a VM with multiple GPUs attached.
Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [Solution Notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/art_and_science_of_ml/solutions/distributed_training.ipynb) for reference.
```
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
```
Next we will configure our environment. Be sure to change the `PROJECT_ID` variable in the below cell to your Project ID. This will be the project to which the Cloud AI Platform resources will be billed. We will also create a bucket for our training artifacts (if it does not already exist).
## Lab Task #1: Setting up the environment
```
import os
# TODO 1
PROJECT_ID = "cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT_ID
REGION = 'us-central1'
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["BUCKET"] = BUCKET
```
Since we are going to submit our training job to Cloud AI Platform, we need to create our trainer package. We will create the `train` directory for our package and create a blank `__init__.py` file so Python knows that this folder contains a package.
```
!mkdir train
!touch train/__init__.py
```
Next we will create a module containing a function which will create our model. Note that we will be using the Fashion MNIST dataset. Since it's a small dataset, we will simply load it into memory for getting the parameters for our model.
Our model will be a DNN with only dense layers, applying dropout to each hidden layer. We will also use ReLU activation for all hidden layers.
```
%%writefile train/model_definition.py
import tensorflow as tf
import numpy as np
# Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=x_train.shape[1:]))
model.add(tf.keras.layers.Dense(1028))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(256))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Activation('softmax'))
return model
```
Before we submit our training jobs to Cloud AI Platform, let's be sure our model runs locally. We will call the `model_definition` function to create our model and use `tf.keras.datasets.fashion_mnist.load_data()` to import the Fashion MNIST dataset.
## Lab Task #2: Create a model to train locally
```
import os
import time
import tensorflow as tf
import numpy as np
from train import model_definition
#Get data
# TODO 2
# TODO -- Your code here.
print("Training time without GPUs locally: {}".format(time.time() - start))
```
## Train on multiple GPUs/CPUs with MultiWorkerMirrored Strategy
That took a few minutes to train our model for 20 epochs. Let's see how we can do better using Cloud AI Platform. We will be leveraging the `MultiWorkerMirroredStrategy` supplied in `tf.distribute`. The main difference between this code and the code from the local test is that we need to compile the model within the scope of the strategy. When we do this our training op will use information stored in the `TF_CONFIG` variable to assign ops to the various devices for the AllReduce strategy.
After the training process finishes, we will print out the time spent training. Since it takes a few minutes to spin up the resources being used for training on Cloud AI Platform, and this time can vary, we want a consistent measure of how long training took.
Note: When we train models on Cloud AI Platform, the `TF_CONFIG` variable is automatically set. So we do not need to worry about adjusting based on what cluster configuration we use.
```
%%writefile train/train_mult_worker_mirrored.py
import os
import time
import tensorflow as tf
import numpy as np
from . import model_definition
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
#Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_dataset(X, Y, epochs, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
dataset = dataset.repeat(epochs).batch(batch_size, drop_remainder=True)
return dataset
ds_train = create_dataset(x_train, y_train, 20, 5000)
ds_test = create_dataset(x_test, y_test, 1, 1000)
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
model = model_definition.create_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, ),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
start = time.time()
model.fit(
ds_train,
validation_data=ds_test,
verbose=2
)
print("Training time with multiple GPUs: {}".format(time.time() - start))
```
## Lab Task #3: Training with multiple GPUs/CPUs on created model using MultiWorkerMirrored Strategy
First we will train a model without using GPUs to give us a baseline. We will use a consistent format throughout the trials. We will define a `config.yaml` file to contain our cluster configuration and the pass this file in as the value of a command-line argument `--config`.
In our first example, we will use a single `n1-highcpu-16` VM.
```
%%writefile config.yaml
# TODO 3a
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="cpu_only_fashion_minst_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.1 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
```
If we go through the logs, we see that the training job will take around 5-7 minutes to complete. Let's now attach two Nvidia Tesla K80 GPUs and rerun the training job.
```
%%writefile config.yaml
# TODO 3b
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_2gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.1 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
```
That was a lot faster! The training job will take upto 5-10 minutes to complete. Let's keep going and add more GPUs!
```
%%writefile config.yaml
# TODO 3c
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_4gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.1 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
```
The training job will take upto 10 minutes to complete. It was faster than no GPUs, but why was it slower than 2 GPUs? If you rerun this job with 8 GPUs you'll actually see it takes just as long as using no GPUs!
The answer is in our input pipeline. In short, the I/O involved in using more GPUs started to outweigh the benefits of having more available devices. We can try to improve our input pipelines to overcome this (e.g. using caching, adjusting batch size, etc.).
```
```
| github_jupyter |
# Mask R-CNN Demo
A quick intro to using the pre-trained model to detect and segment objects.
```
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
sys.path.append('..')
import mrcnn.coco as coco
import mrcnn.utils as utils
import mrcnn.model as modellib
import mrcnn.visualize as visualize
%matplotlib inline
# Root directory of the project
# ROOT_DIR = os.path.join(os.getcwd(),'..')
# Directory to save logs and trained model
# MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Path to trained weights file
# Download this file and place in the root of your
# project (See README file for details)
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
# Root directory of the project
ROOT_DIR = os.getcwd()
MODEL_PATH = 'E:\Models'
# Directory to save logs and trained model
MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_logs")
# Path to COCO trained weights
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
```
## Configurations
We'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```.
For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change.
```
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
```
## Create Model and Load Trained Weights
```
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
```
## Class Names
The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.
To improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.
To get the list of class names, you'd load the dataset and then use the ```class_names``` property like this.
```
# Load COCO dataset
dataset = coco.CocoDataset()
dataset.load_coco(COCO_DIR, "train")
dataset.prepare()
# Print class names
print(dataset.class_names)
```
We don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.)
```
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
```
## Run Object Detection
```
# Load a random image from the images folder
import mrcnn.visualize as visualize
file_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])
```
| github_jupyter |
### denovo missense prediction w/ feature intersection
* use positions w/ mpc **OR** pathogenic fraction
* calc path freq using counts
* total path freq
* total benign freq
```
import pandas, numpy
from scipy.stats import entropy
import pydot, pydotplus, graphviz
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from sklearn import linear_model, metrics, tree, svm
from sklearn.neural_network import MLPClassifier
from sklearn.externals.six import StringIO
from sklearn.preprocessing import PolynomialFeatures
from sklearn.ensemble import ExtraTreesClassifier
from IPython.display import HTML
%matplotlib inline
def calc_path_freq(rows):
# sum of freqs for path
df = rows[ (rows.clin_class=='PATHOGENIC') |
(rows.clin_class=='LIKLEY_PATHOGENIC')]
l = len(df)
pathogenic_sum = sum(df['freq'])
neg = sum(df['neg_fam'])
if l == 0:
return 0, 0, -1, 0
return pathogenic_sum, pathogenic_sum/l, entropy(df['freq']/pathogenic_sum), l
def calc_benign_freq(rows):
# sum of freqs for
df = rows[ (rows.clin_class=='LIKELY_BENIGN') |
(rows.clin_class=='BENIGN')]
benign_sum = sum(df['freq'])
l = len(df)
neg = sum(df['neg_fam'])
if l == 0:
return 0, 0, -1, 0
return benign_sum, benign_sum/l, entropy(df['freq']/benign_sum), l
def calc_path_frac(rows):
pfam = list(rows['pfam'].values)[0]
pathogenic = len(rows[ (rows.clin_class=='PATHOGENIC') | (rows.clin_class=='LIKLEY_PATHOGENIC')])
benign = len(rows[ (rows.clin_class=='LIKELY_BENIGN') | (rows.clin_class=='BENIGN')])
frac = -1
if pathogenic+benign:
frac = pathogenic/(pathogenic+benign)
pf, pf_avg, pf_ent, pcount = calc_path_freq(rows)
bf, bf_avg, bf_ent, bcount = calc_benign_freq(rows)
r = -1
if bf:
r = pf/bf
return pandas.Series([frac, len(rows), pf, pf_avg, pf_ent, pcount, bf, bf_avg, bf_ent, bcount, r],
index=['path_frac', 'size',
'path_freq', 'p_freq_avg', 'p_freq_ent', 'ps',
'benign_freq', 'b_freq_avg', 'b_freq_ent', 'bs',
'fRatio'])
def calc_tot_freq_ratio(rows):
path_sum = calc_path_freq(rows)
benign_sum = calc_benign_freq(rows)
return path_sum/benign_sum
dat_file = '../data/interim/EPIv6.eff.dbnsfp.anno.hHack.dat.xls'
df_pre = pandas.read_csv(dat_file, sep='\t').fillna(0)
df_pre.loc[:, 'freq'] = df_pre['pos_fam']/(df_pre['pos_fam']+df_pre['neg_fam'])
df = (df_pre['pfam'].str.split(',', expand=True)
.stack()
.reset_index(level=0)
.set_index('level_0')
.rename(columns={0:'pfam'})
.join(df_pre.drop('pfam',1), how='left')
)
dd = df.groupby('pfam').apply(calc_path_frac)
ff = dd.reset_index()
# mk domain features
def match(row, domain_info):
ls = []
for pfam in row['pfam'].split(','):
if pfam in domain_info:
if domain_info[pfam][2] == 0:
ls.append(domain_info[pfam])
if len(ls) == 0:
for pfam in row['pfam'].split(','):
if pfam in domain_info:
return domain_info[pfam]
if len(ls):
return ls[0]
else:
return (0, 0,
0, 0, -1, 0,
0, 0, -1, 0,
-1, 1)
ff.loc[:, 'path_na'] = ff.apply(lambda row: 1 if row['path_frac']==-1 else 0, axis=1)
domain_info = {pfam:[path_frac, size,
path_freq, path_avg, path_ent, pc,
b_freq, b_avg, b_ent, bc,
fr, path_na]
for pfam, path_frac, size, path_freq, path_avg, path_ent, pc, b_freq, b_avg, b_ent, bc, fr, path_na
in ff.values}
df_pre.loc[:, 'path_frac_t'] = df_pre.apply(lambda row: match(row, domain_info)[0], axis=1)
df_pre.loc[:, 'size_t'] = df_pre.apply(lambda row: match(row, domain_info)[1], axis=1)
df_pre.loc[:, 'path_na_t'] = df_pre.apply(lambda row: match(row, domain_info)[-1], axis=1)
df_pre.loc[:, 'in_none_pfam'] = df_pre.apply(lambda row: 1 if 'none' in row['pfam'] else 0, axis=1)
# use patient counts
df_pre.loc[:, 'path_freq'] = df_pre.apply(lambda row: match(row, domain_info)[2], axis=1)
df_pre.loc[:, 'path_avg'] = df_pre.apply(lambda row: match(row, domain_info)[3], axis=1)
df_pre.loc[:, 'path_ent'] = df_pre.apply(lambda row: match(row, domain_info)[4], axis=1)
df_pre.loc[:, 'path_cnt'] = df_pre.apply(lambda row: match(row, domain_info)[5], axis=1)
df_pre.loc[:, 'benign_freq'] = df_pre.apply(lambda row: match(row, domain_info)[6], axis=1)
df_pre.loc[:, 'benign_avg'] = df_pre.apply(lambda row: match(row, domain_info)[7], axis=1)
df_pre.loc[:, 'benign_ent'] = df_pre.apply(lambda row: match(row, domain_info)[8], axis=1)
df_pre.loc[:, 'benign_cnt'] = df_pre.apply(lambda row: match(row, domain_info)[9], axis=1)
df_pre.loc[:, 'path_benign_freq_r'] = df_pre.apply(lambda row: match(row, domain_info)[10], axis=1)
#df_pre.loc[:, 'path_na_t'] = df_pre.apply(lambda row: match(row, domain_info)[2], axis=1)
# this is for training
# use not just missense
# I do not need to require an mpc score here anymore (df_pre.mpc>0)
df_x_pre = df_pre[ (df_pre.clin_class != 'VUS') ]
df_s = df_x_pre.groupby('pfam').size().reset_index()
multi_pfam = set( df_s[df_s[0]>1]['pfam'].values )
df_x_pre.loc[:, 'multi_pfam'] = df_x_pre.apply(lambda row: row['pfam'] in multi_pfam, axis=1)
df_x = df_x_pre[ (df_x_pre.multi_pfam) & (df_x_pre.eff=='missense_variant') & (df_x_pre.mpc>0)]
df_x.loc[:, 'y'] = df_x.apply(lambda row: 1 if row['clin_class'] in ('PATHOGENIC', 'LIKLEY_PATHOGENIC')
else 0, axis=1)
df_x.head()
train_keys = {':'.join([str(x) for x in v]):True for v in df_x[['chrom', 'pos', 'ref', 'alt']].values}
print(len(train_keys))
hash={'LIKELY_BENIGN':'Benign',
'BENIGN':'Benign',
'PATHOGENIC':'Pathogenic',
'LIKLEY_PATHOGENIC':'Pathogenic'
}
df_x.loc[:, 'plot_class'] = df_x.apply(lambda row: hash[row['clin_class']], axis=1)
flatui = ["#e74c3c", "#2ecc71"]
sns.set(font_scale=3)
ax = sns.countplot(x="plot_class", data=df_x, palette=sns.color_palette(flatui))
ax.set_ylabel('Missense variant count')
ax.set_xlabel('')
ax.set_title('GeneDx training data')
plt.xticks(rotation=45)
#ax.set_xticklabels(rotation=30)
clin_file = '../data/interim/denovo/denovo.dat'
clinvar_df_pre = pandas.read_csv(clin_file, sep='\t').fillna(0)
focus_gene_ls = ('SCN1A','SCN2A','KCNQ2', 'KCNQ3', 'CDKL5', 'PCDH19', 'SCN1B', 'SCN8A', 'SLC2A1', 'SPTAN1', 'STXBP1', 'TSC1')
clinvar_df_pre.loc[:, "key"] = clinvar_df_pre.apply(lambda row: ':'.join([str(row[x]) for x in ['chrom', 'pos', 'ref', 'alt']]), axis=1)
clinvar_df_pre.loc[:, "not_in_training"] = clinvar_df_pre.apply(lambda row: not row['key'] in train_keys, axis=1)
clinvar_df_pre.loc[:, "is_focus"] = clinvar_df_pre.apply(lambda row: row['gene'] in focus_gene_ls, axis=1)
clinvar_df = clinvar_df_pre[(clinvar_df_pre.eff=='missense_variant')
& (clinvar_df_pre.not_in_training)
& (clinvar_df_pre.mpc>0)
& (clinvar_df_pre.is_focus)].drop_duplicates()
clinvar_df.loc[:, 'path_frac_t'] = clinvar_df.apply(lambda row: match(row, domain_info)[0], axis=1)
clinvar_df.loc[:, 'size_t'] = clinvar_df.apply(lambda row: match(row, domain_info)[1], axis=1)
clinvar_df.loc[:, 'path_freq'] = clinvar_df.apply(lambda row: match(row, domain_info)[2], axis=1)
clinvar_df.loc[:, 'path_avg'] = clinvar_df.apply(lambda row: match(row, domain_info)[3], axis=1)
clinvar_df.loc[:, 'path_ent'] = clinvar_df.apply(lambda row: match(row, domain_info)[4], axis=1)
clinvar_df.loc[:, 'path_cnt'] = clinvar_df.apply(lambda row: match(row, domain_info)[5], axis=1)
clinvar_df.loc[:, 'benign_freq'] = clinvar_df.apply(lambda row: match(row, domain_info)[6], axis=1)
clinvar_df.loc[:, 'benign_avg'] = clinvar_df.apply(lambda row: match(row, domain_info)[7], axis=1)
clinvar_df.loc[:, 'benign_ent'] = clinvar_df.apply(lambda row: match(row, domain_info)[8], axis=1)
clinvar_df.loc[:, 'benign_cnt'] = clinvar_df.apply(lambda row: match(row, domain_info)[9], axis=1)
clinvar_df.loc[:, 'path_benign_freq_r'] = clinvar_df.apply(lambda row: match(row, domain_info)[10], axis=1)
clinvar_df.loc[:, 'path_na_t'] = clinvar_df.apply(lambda row: match(row, domain_info)[-1], axis=1)
clinvar_df.loc[:, 'in_none_pfam'] = clinvar_df.apply(lambda row: 1 if 'none' in row['pfam'] else 0, axis=1)
# need a smarter match to domain here
#m = pandas.merge(clinvar_df, ff, on='pfam', how='left')
#m.head()
print(len(clinvar_df_pre))
print(len(clinvar_df))
clinvar_df.sort_values(by='af_1kg_all', ascending=False)
def eval_pred(row, col):
if row[col]>.9:
return 'right'
if row[col]<.1:
return 'wrong'
return 'vus'
# train new tree and apply to clinvar
forest = ExtraTreesClassifier(n_estimators=300,
random_state=13,
bootstrap=True,
max_features=7,
min_samples_split=2,
max_depth=8,
min_samples_leaf=5,
n_jobs=4)
#tree_clf = linear_model.LogisticRegression(penalty='l1', fit_intercept=True)
#poly = PolynomialFeatures(degree=6, interaction_only=False, include_bias=False)
all_preds = []
all_truth = []
#
cols = ['mpc', 'size_t', 'path_frac_t', 'in_none_pfam',
'path_freq', 'path_avg', 'path_ent', 'path_cnt',
'benign_freq', 'benign_avg', 'benign_ent', 'benign_cnt',
'af_1kg_all', 'mtr', 'path_benign_freq_r']
X, y = df_x[cols], df_x['y']
forest.fit(X, y)
#tree_clf.fit(X, y)
X_clin = clinvar_df[cols]
preds = forest.predict(X_clin)
clinvar_df['tree_pred'] = preds
clinvar_df.loc[:, 'PredictionStatus'] = clinvar_df.apply(lambda row: eval_pred(row, 'tree_pred'), axis=1)
import collections
print( 'scores', collections.Counter(clinvar_df['PredictionStatus']) )
importances = forest.feature_importances_
std = numpy.std([atree.feature_importances_ for atree in forest.estimators_],
axis=0)
indices = numpy.argsort(importances)[::-1]
# Print the feature ranking
feature_ls = []
print("Feature ranking:")
for f in range(X.shape[1]):
ls = (cols[indices[f]],
f + 1, indices[f],
importances[indices[f]])
print("%s, %d. feature %d (%f)" % ls)
feature_ls.append([ls[0], ls[-1]])
fhash={'mpc':'MPC',
'size_t':'Domain GeneDx var count',
'path_na_t':'No variants',
'path_frac_t':'Domain fraction of pathogenic GeneDx vars',
'in_none_pfam':'Outside Pfam domain flag',
'path_freq':'Domain pathogenic GeneDx freq',
'path_avg':'Domain avg pathogenic GeneDx freq',
'path_ent':'Entropy of domain pathogenic GeneDx freq',
'path_cnt':'Domain pathogenic var GeneDx count',
'benign_freq':'Domain benign GeneDx freq',
'benign_avg':'Domain avg benign GeneDx freq',
'benign_ent':'Entropy of domain benign GeneDx freq',
'benign_cnt':'Domain benign var GeneDx count',
'af_1kg_all':'1KG var freq',
'mtr':'MTR',
'path_benign_freq_r':'Ratio of domain benign:pathogenic GeneDx freqs'}
feature_df = pandas.DataFrame({'feature':[fhash[x[0]] for x in feature_ls], 'importance':[x[1] for x in feature_ls]})
ax = sns.barplot(data=feature_df, x='feature', y='importance', palette="Greens")
ax.set_ylabel('Feature importance')
ax.set_xlabel('')
#ax.set_title('ClinVar subset (w/o GeneDx) testing data')
plt.xticks(rotation=90)
#plt.rcParams['figure.figsize'] = 20, 6
#plt.figure(figsize=(40,6))
#f, ax = plt.subplots(figsize=(40,6))
#sns.set_context("talk")
g_df = (clinvar_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatus']]
.groupby(['gene','PredictionStatus'])
.size().reset_index().rename(columns={0:'size'}))
dd = g_df.groupby('gene').sum().reset_index()
use_genes = set(dd[dd['size']>0]['gene'].values)
g_df.loc[:, 'keep'] = g_df.apply(lambda row: row['gene'] in use_genes, axis=1)
sns.set(font_scale=1.75)
flatui = ["#2ecc71", "#3498db", "#e74c3c",]
ss = sns.factorplot(x='gene', hue='PredictionStatus', y='size', data=g_df[g_df['keep']],
kind='bar', palette=sns.color_palette(flatui), size=5, aspect=3)
ss.set_ylabels('DenovoDb missense variants')
ss.set_xlabels('')
ss.set_titles('Dominan burden + MPC performance')
ss.savefig("../docs/plots/denovo_burden_gene_eval.png")
# train new tree and apply to clinvar: just pathogenic frac
tree_clf = tree.DecisionTreeClassifier(max_depth=1)
all_preds = []
all_truth = []
cols = ['mpc', 'size_t', 'path_na_t', 'path_frac_t']
X, y = df_x[cols], df_x['y']
tree_clf.fit(X, y)
all_preds = []
all_truth = []
cols = ['mpc']
X, y = df_x[cols], df_x['y'] #X, y = df_x[cols], df_x['y']
tree_clf.fit(X, y)
X_clin = clinvar_df[cols]
preds = tree_clf.predict(X_clin)
clinvar_df['mpc_pred'] = preds
clinvar_df.loc[:, 'PredictionStatusMPC'] = clinvar_df.apply(lambda row: eval_pred(row, 'mpc_pred'), axis=1)
g_df = (clinvar_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatusMPC']]
.groupby(['gene','PredictionStatusMPC'])
.size().reset_index().rename(columns={0:'size'}))
dd = g_df.groupby('gene').sum().reset_index()
use_genes = set(dd[dd['size']>0]['gene'].values)
g_df.loc[:, 'keep'] = g_df.apply(lambda row: row['gene'] in use_genes, axis=1)
sns.set(font_scale=1.75)
flatui = ["#2ecc71", "#3498db", "#e74c3c",]
ss = sns.factorplot(x='gene', hue='PredictionStatusMPC', y='size', data=g_df[g_df['keep']],
kind='bar', palette=sns.color_palette(flatui), size=5, aspect=3)
ss.set_ylabels('DenovoDb missense variants')
ss.set_xlabels('')
ss.set_titles('MPC performance')
ss.savefig("../docs/plots/denovo_mpc_eval.png")
scores = clinvar_df['mpc'].values
truth = clinvar_df['y'].values
fpr_mpc, tpr_mpc, _ = metrics.roc_curve(truth, scores, pos_label=1)
mpc_auc = metrics.auc(fpr_mpc, tpr_mpc)
import collections
print( 'mpc', collections.Counter(clinvar_df['PredictionStatusMPC']) )
print( 'new', collections.Counter(clinvar_df['PredictionStatus']) )
clinvar_df[clinvar_df.gene=='TSC1']
clinvar_df[clinvar_df.gene=='SPTAN1']
clinvar_df[clinvar_df.gene=='TSC1']
```
| github_jupyter |
```
import interfaces._pyscf as pscf
import ase.io as aio
import numpy as np
import numpy.linalg as LA
import os,sys
import aqml.cheminfo.lo.dm as cld
np.set_printoptions(precision=4,suppress=True)
def normalize(_vs, _signs=None):
_vsu = []
for i,vi in enumerate(_vs):
vo = np.array(vi)/LA.norm(vi)
if _signs is not None:
vo *= _signs[i]
_vsu.append( vo )
return np.array(_vsu)
def get_hyb_map(vsp1, vsb1):
idx = []; vals = []
for i1,vp1 in enumerate(vsp1):
_vals = []
for i2,vb1 in enumerate(vsb1):
#_vals.append( np.abs(np.dot(vp1,vb1)) )
_vals.append( np.dot(vp1,vb1) )
seq = np.argsort(_vals)
_id = seq[-1]
#if not (_vals[seq[-1]] > _vals[seq[-2]]):
print( ' _vals = ', _vals)
idx.append( _id ); vals.append( _vals[_id] )
return idx, vals
def get_dm_obj(obj, basis='sto-3g', meth='b3lyp', idx=None, idx2=None, iprt=False):
spin=0; a=0.; verbose=3
if isinstance(obj,str):
assert os.path.exists(obj)
m = aio.read(obj)
else:
m = obj
zs, coords = m.numbers, m.positions
#fno = fn[:-4] + '.out'
obj2 = cld.density_matrix(zs, coords, output=None, basis=basis, meth=meth, \
spin=spin, verbose=verbose, iprt=iprt)
obj2.calc_ca_dm(idx=idx, idx2=idx2)
return obj2
def print_dm(obj, ia, ja, bst='sto-3g'):
assert bst=='sto-3g'
zs = obj.zs
nheav = (np.array(zs)>1).sum()
aoidxs = []
nao0 = nheav * 5
for i in [ia,ja]:
if i <= nheav-1:
aoidxs.append( [i*5, (i+1)*5] )
else:
aoidxs.append( [nao0+(i-nheav), nao0+(i-nheav)*1+1] )
ias1, ias2 = aoidxs
#print 'ias1=', ias1, ', ias2=',ias2
print( obj.dm1[ias1[0]:ias1[1]][:,ias2[0]:ias2[1]].T)
m1 = aio.read('ch4.xyz')
#av.view(m2)
c1 = get_dm_obj(m1)
oo = pscf.io(c1.m)
orig, cell, dt = oo.orbital(c1.B, grids=[100,100,100], label='ch4')
c1.B[:5,:5]
from ase.io.cube import read_cube_data
data, atoms = read_cube_data('ch4_1.cube')
from mayavi import mlab
mlab.init_notebook()
mlab.figure()
#mlab.test_contour3d()
#atoms = m1; data = dt[0]; _contours = 5
from mayavi import mlab
import importlib
importlib.reload(mlab)
mlab.init_notebook()
_atoms = mlab.draw_molecule(atoms)
_atoms[0]
_atoms[1]
from mayavi import mlab
mlab.init_notebook()
mlab.test_molecule()
source = mlab.pipeline.scalar_field(data)
min = data.min()
max = data.max()
vol = mlab.pipeline.volume(source, vmin=min + 0.65 * (max - min),
vmax=min + 0.9 * (max - min))
mlab.view() #132, 54, 45, [21, 20, 21.5])
mlab.show()
from mayavi import mlab
mlab.init_notebook()
mlab.figure()
data, atoms = read_cube_data('ch4_1.cube')
mols = mlab.contour3d(data)
T,F=True,False
mlab.figure()
_atoms[0]
mlab.contour3d(data, contours=[0.07], opacity=0.5, color=(1,0,0)) #transparent=F, colormap='hot')
mlab.contour3d(data, contours=[-0.07], opacity=0.5, color=(0,1,0)) # transparent=F, colormap='hot')
_atoms[0]
o = [[30, 62, 19], [8, 21, 10]]
ox, oy, oz = list(map(np.array, zip(*o)))
ox, oy, oz
mlab.figure()
_ats = mlab.test_molecule()
_ats[0]
orig, cell
```
| github_jupyter |
<a href="https://colab.research.google.com/github/truongkhanhduy95/MIT-Deep-Learning/blob/master/deep_learning_basics/deep_learning_basics.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Deep Learning Basics
This tutorial accompanies the [lecture on Deep Learning Basics](https://www.youtube.com/watch?list=PLrAXtmErZgOeiKm4sgNOknGvNjby9efdf&v=O5xeyoRL95U) given as part of [MIT Deep Learning](https://deeplearning.mit.edu). Acknowledgement to amazing people involved is provided throughout the tutorial and at the end. You can watch the video on YouTube:
[](https://www.youtube.com/watch?list=PLrAXtmErZgOeiKm4sgNOknGvNjby9efdf&v=O5xeyoRL95U)
In this tutorial, we mention seven important types/concepts/approaches in deep learning, introducing the first 2 and providing pointers to tutorials on the others. Here is a visual representation of the seven:

At a high-level, neural networks are either encoders, decoders, or a combination of both. Encoders find patterns in raw data to form compact, useful representations. Decoders generate new data or high-resolution useful infomation from those representations. As the lecture describes, deep learning discovers ways to **represent** the world so that we can reason about it. The rest is clever methods that help use deal effectively with visual information, language, sound (#1-6) and even act in a world based on this information and occasional rewards (#7).
1. **Feed Forward Neural Networks (FFNNs)** - classification and regression based on features. See [Part 1](#Part-1:-Boston-Housing-Price-Prediction-with-Feed-Forward-Neural-Networks) of this tutorial for an example.
2. **Convolutional Neural Networks (CNNs)** - image classification, object detection, video action recognition, etc. See [Part 2](#Part-2:-Classification-of-MNIST-Dreams-with-Convolution-Neural-Networks) of this tutorial for an example.
3. **Recurrent Neural Networks (RNNs)** - language modeling, speech recognition/generation, etc. See [this TF tutorial on text generation](https://www.tensorflow.org/tutorials/sequences/text_generation) for an example.
4. **Encoder Decoder Architectures** - semantic segmentation, machine translation, etc. See [our tutorial on semantic segmentation](https://github.com/lexfridman/mit-deep-learning/blob/master/tutorial_driving_scene_segmentation/tutorial_driving_scene_segmentation.ipynb) for an example.
5. **Autoencoder** - unsupervised embeddings, denoising, etc.
6. **Generative Adversarial Networks (GANs)** - unsupervised generation of realistic images, etc. See [this TF tutorial on DCGANs](https://github.com/tensorflow/tensorflow/blob/r1.11/tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb) for an example.
7. **Deep Reinforcement Learning** - game playing, robotics in simulation, self-play, neural arhitecture search, etc. We'll be releasing notebooks on this soon and will link them here.
There are selective omissions and simplifications throughout these tutorials, hopefully without losing the essence of the underlying ideas. See Einstein quote...
## Part 0: Prerequisites:
We recommend that you run this this notebook in the cloud on Google Colab (see link with icon at the top) if you're not already doing so. It's the simplest way to get started. You can also [install TensorFlow locally](https://www.tensorflow.org/install/). But, again, simple is best (with caveats):

[tf.keras](https://www.tensorflow.org/guide/keras) is the simplest way to build and train neural network models in TensorFlow. So, that's what we'll stick with in this tutorial, unless the models neccessitate a lower-level API.
Note that there's [tf.keras](https://www.tensorflow.org/guide/keras) (comes with TensorFlow) and there's [Keras](https://keras.io/) (standalone). You should be using [tf.keras](https://www.tensorflow.org/guide/keras) because (1) it comes with TensorFlow so you don't need to install anything extra and (2) it comes with powerful TensorFlow-specific features.
```
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Commonly used modules
import numpy as np
import os
import sys
# Images, plots, display, and visualization
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import cv2
import IPython
# Scikit-learn
from sklearn.preprocessing import StandardScaler, MinMaxScaler
print(tf.__version__)
```
## Part 1: Boston Housing Price Prediction with Feed Forward Neural Networks
Let's start with using a fully-connected neural network to do predict housing prices. The following image highlights the difference between regression and classification (see part 2). Given an observation as input, **regression** outputs a continuous value (e.g., exact temperature) and classificaiton outputs a class/category that the observation belongs to.
<img src="https://i.imgur.com/vvSoAzg.jpg" alt="classification_regression" width="400"/>
For the Boston housing dataset, we get 506 rows of data, with 13 features in each. Our task is to build a regression model that takes these 13 features as input and output a single value prediction of the "median value of owner-occupied homes (in $1000)."
Now, we load the dataset. Loading the dataset returns four NumPy arrays:
* The `train_images` and `train_labels` arrays are the *training set*—the data the model uses to learn.
* The model is tested against the *test set*, the `test_images`, and `test_labels` arrays.
```
(X_train, y_train), (X_test, y_test) = keras.datasets.boston_housing.load_data()
#Zscore normalization
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
```
### Build the model
Building the neural network requires configuring the layers of the model, then compiling the model. First we stack a few layers together using `keras.Sequential`. Next we configure the loss function, optimizer, and metrics to monitor. These are added during the model's compile step:
* *Loss function* - measures how accurate the model is during training, we want to minimize this with the optimizer.
* *Optimizer* - how the model is updated based on the data it sees and its loss function.
* *Metrics* - used to monitor the training and testing steps.
Let's build a network with 1 hidden layer of 20 neurons, and use mean squared error (MSE) as the loss function (most common one for regression problems):
```
def build_model():
model = keras.Sequential([
keras.layers.Dense(20, activation=tf.nn.relu, input_shape=[len(X_train[0])]),
keras.layers.Dense(1)
])
model.compile(optimizer = tf.train.AdamOptimizer(),
loss = 'mse',
metrics = ['mae','mse'])
return model
```
### Train the model
Training the neural network model requires the following steps:
1. Feed the training data to the model—in this example, the `train_features` and `train_labels` arrays.
2. The model learns to associate features and labels.
3. We ask the model to make predictions about a test set—in this example, the `test_features` array. We verify that the predictions match the labels from the `test_labels` array.
To start training, call the `model.fit` method—the model is "fit" to the training data:
```
# this helps makes our output less verbose but still shows progress
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
model = build_model()
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
history = model.fit(X_train, y_train, epochs=1000, verbose=0, validation_split = 0.1, callbacks=[early_stop, PrintDot()])
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
# Show RMSE measure
rmse_final = np.sqrt(float(hist['val_mean_squared_error'].tail(1)))
print()
print('Final Root Mean Square Error on validation set: {}'.format(round(rmse_final, 3)))
```
Now, let's plot the loss function measure on the training and validation sets. The validation set is used to prevent overfitting ([learn more about it here](https://www.tensorflow.org/tutorials/keras/overfit_and_underfit)). However, because our network is small, the training convergence without noticeably overfitting the data as the plot shows.
```
def plot_history(hist):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Squared Error [Thoursand Dollart$^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'], label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'], label='Val Error')
plt.legend()
plt.ylim([0,50])
plot_history(hist)
```
Next, compare how the model performs on the test dataset:
```
X_test = scaler.transform(X_test)
mse, _, _ = model.evaluate(X_test, y_test)
rmse = np.sqrt(mse)
print('Root Mean Square Error on test set: {}'.format(round(rmse, 3)))
```
## Part 2: Classification of MNIST Dreams with Convolutional Neural Networks
Next, let's build a convolutional neural network (CNN) classifier to classify images of handwritten digits in the MNIST dataset with a twist where we test our classifier on high-resolution hand-written digits from outside the dataset.
The MNIST dataset containss 70,000 grayscale images of handwritten digits at a resolution of 28 by 28 pixels. The task is to take one of these images as input and predict the most likely digit contained in the image (along with a relative confidence in this prediction):
<img src="https://i.imgur.com/ITrm9x4.png" width="500px">
Now, we load the dataset. The images are 28x28 NumPy arrays, with pixel values ranging between 0 and 255. The *labels* are an array of integers, ranging from 0 to 9.
```
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
```
We scale these values to a range of 0 to 1 before feeding to the neural network model. For this, we divide the values by 255. It's important that the *training set* and the *testing set* are preprocessed in the same way:
```
def preprocess_images(imgs): # should work for both single and multiple images
sample_img = imgs if len(imgs.shape) == 2 else imgs[0]
assert sample_img.shape == (28,28), sample_img.shape # make sure images are 28x28
return imgs / 255.0
X_train = preprocess_images(X_train)
X_test = preprocess_images(X_test)
```
Display the first 5 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network.
```
plt.figure(figsize=(10,2))
for i in range(5):
plt.subplot(1,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X_train[i], cmap=plt.cm.binary)
plt.xlabel(y_train[i])
def build_model():
model = keras.Sequential([
keras.layers.Flatten(input_shape(28, 28)), # flatten image to a 1d-array
keras.layers.Dense(128, activation=tf.nn.relu), # fully connected layer (128 nodes)
keras.layers.Dense(10, activation=tf.nn.softmax)# softmax layer returns an array of 10 probability scores that sum to 1
])
return model
```
Before the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:
* *Loss function* - measures how accurate the model is during training, we want to minimize this with the optimizer.
* *Optimizer* - how the model is updated based on the data it sees and its loss function.
* *Metrics* - used to monitor the training and testing steps. "accuracy" is the fraction of images that are correctly classified.
As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 98.6% on the training data.
### Make predictions
With the model trained, we can use it to make predictions about some images. Let's step outside the MNIST dataset for that and go with the beautiful high-resolution images generated by a mixture of CPPN, GAN, VAE. See [great blog post by hardmaru](http://blog.otoro.net/2016/04/01/generating-large-images-from-latent-vectors/) for the source data and a description of how these morphed animations are generated:

```
mnist_dream_path = 'images/mnist_dream.mp4'
mnist_prediction_path = 'images/mnist_dream_predicted.mp4'
# download the video if running in Colab
if not os.path.isfile(mnist_dream_path):
print('downloading the sample video...')
#mnist_dream_path = urllib.request.urlretrieve(os.path.join(deep_repo_url, 'mit_driveseg_sample.mp4')[0]
def cv2_imshow(img):
_, ret = cv2.imencode('.png', img)
img_ip = IPython.display.Image(data=ret)
IPython.display.display(img_ip)
cap = cv2.VideoCapture(mnist_dream_path)
vw = None
frame = -1 # counter for debugging (mostly), 0-indexed
# go through all the frames and run our classifier on the high res MNIST images as they morph from number to number
while True: # should 481 frames
frame += 1
ret, img = cap.read()
if not ret: break
assert img.shape[0] == img.shape[1] # should be a square
if img.shape[0] != 720:
img = cv2.resize(img, (720, 720))
#preprocess the image for prediction
img_proc = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_proc = cv2.resize(img_proc, (28, 28))
img_proc = preprocess_images(img_proc)
img_proc = 1 - img_proc # inverse since training dataset is white text with black background
net_in = np.expand_dims(img_proc, axis=0) # expand dimension to specify batch size of 1
model = build_model()
preds = model.predict(net_in)[0]
guess = np.argmax(preds)
perc = np.rint(preds * 100).astype(int)
img = 255 - img
pad_color = 0
img = np.pad(img, ((0,0), (0,1280-720), (0,0)), mode='constant', constant_values=(pad_color))
line_type = cv2.LINE_AA
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1.3
thickness = 2
x, y = 740, 60
color = (255, 255, 255)
text = "Neural Network Output:"
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
text = "Input:"
cv2.putText(img, text=text, org=(30, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
y = 130
for i, p in enumerate(perc):
if i == guess: color = (255, 218, 158)
else: color = (100, 100, 100)
rect_width = 0
if p > 0: rect_width = int(p * 3.3)
rect_start = 180
cv2.rectangle(img, (x+rect_start, y-5), (x+rect_start+rect_width, y-20), color, -1)
text = '{}: {:>3}%'.format(i, int(p))
cv2.putText(img, text=text, org=(x, y), fontScale=font_scale, fontFace=font_face, thickness=thickness,
color=color, lineType=line_type)
y += 60
# if you don't want to save the output as a video, set this to False
save_video = True
if save_video:
if vw is None:
codec = cv2.VideoWriter_fourcc(*'DIVX')
vid_width_height = img.shape[1], img.shape[0]
vw = cv2.VideoWriter(mnist_prediction_path, codec, 30, vid_width_height)
# 15 fps above doesn't work robustly so we right frame twice at 30 fps
vw.write(img)
vw.write(img)
# scale down image for display
img_disp = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
cv2_imshow(img_disp)
IPython.display.clear_output(wait=True)
cap.release()
if vw is not None:
vw.release()
```
The above shows the prediction of the network by choosing the neuron with the highest output. While the output layer values add 1 to one, these do not reflect well-calibrated measures of "uncertainty". Often, the network is overly confident about the top choice that does not reflect a learned measure of probability. If everything ran correctly you should get an animation like this:

## Acknowledgements
The contents of this tutorial is based on and inspired by the work of [TensorFlow team](https://www.tensorflow.org) (see their [Colab notebooks](https://www.tensorflow.org/tutorials/)), our [MIT Human-Centered AI team](https://hcai.mit.edu), and individual pieces referenced in the [MIT Deep Learning](https://deeplearning.mit.edu) course slides.
```
```
| github_jupyter |
<img src="../../../img/logo-bdc.png" align="right" width="64"/>
# <span style="color: #336699">Land use and land cover classification in the Brazilian Cerrado biome using Brazil Data Cube</span>
<hr style="border:2px solid #0077b9;">
<br/>
<div style="text-align: center;font-size: 90%;">
Rolf E. O. Simões <sup><a href="mailto:rolf.simoes@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-0953-4132"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Alber H. Sanchez <sup><a href="mailto:alber.ipia@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0001-7966-2880"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Felipe M. Carlos <sup><a href="mailto:felipe.carlos@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0002-3334-4315"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Leonardo S. Vieira <sup><a href="mailto:leonardo.vieira@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0002-3397-6232"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>,<br/>
Karine R. Ferreira <sup><a href="mailto:karine.ferreira@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-2656-5504"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Lubia Vinhas <sup><a href="mailto:lubia.vinhas@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-1104-3607"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Gilberto R. Queiroz<sup>* <a href="mailto:gilberto.queiroz@inpe.br"><i class="far fa-lg fa-envelope"></i></a> <a href="https://orcid.org/0000-0001-7534-0219"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>
<br/><br/>
Earth Observation and Geoinformatics Division, National Institute for Space Research (INPE)
<br/>
Avenida dos Astronautas, 1758, Jardim da Granja, São José dos Campos, SP 12227-010, Brazil
<br/><br/>
<sup>*</sup> Author to whom correspondence should be addressed.
<br/><br/>
February 24, 2021
</div>
<br/>
<div style="text-align: justify; margin-left: 10%; margin-right: 10%;">
<b>Abstract.</b> This Jupyter Notebook compendium contains useful information for the creation of land use and land cover (LULC) maps using Earth observations data cubes and machine learning (ML) techniques. The code is based on the research pipeline described in the paper <em>Earth Observation Data Cubes for Brazil: Requirements, Methodology and Products</em>. These notebooks access open data available in the Brazil Data Cube platform.
</div>
<br/>
<div style="text-align: justify; margin-left: 15%; margin-right: 15%;font-size: 75%; border-style: solid; border-color: #0077b9; border-width: 1px; padding: 5px;">
<b>This Jupyter Notebook is supplement to the <a href="https://www.mdpi.com/2072-4292/12/24/4033/htm#sec5-remotesensing-12-04033" target="_blank">Section 5</a> of the following paper:</b>
<div style="margin-left: 10px; margin-right: 10px">
Ferreira, K.R.; Queiroz, G.R.; Vinhas, L.; Marujo, R.F.B.; Simoes, R.E.O.; Picoli, M.C.A.; Camara, G.; Cartaxo, R.; Gomes, V.C.F.; Santos, L.A.; Sanchez, A.H.; Arcanjo, J.S.; Fronza, J.G.; Noronha, C.A.; Costa, R.W.; Zaglia, M.C.; Zioti, F.; Korting, T.S.; Soares, A.R.; Chaves, M.E.D.; Fonseca, L.M.G. 2020. Earth Observation Data Cubes for Brazil: Requirements, Methodology and Products. Remote Sens. 12, no. 24: 4033. DOI: <a href="https://doi.org/10.3390/rs12244033" target="_blank">10.3390/rs12244033</a>.
</div>
</div>
# <span style="color: #336699">Validation results for Sentinel-2/MSI classification</span>
<hr style="border:1px solid #0077b9;">
This document presents the process for the validation of the results generated in the classification made using the Sentinel-2/MSI data cube. In this example, the validation process was done using the [SITS R package](https://github.com/e-sensing/sits).
```
library(sf)
library(sits)
library(raster)
```
## <span style="color: #336699">Load data</span>
To perform the validation, it is initially necessary to load the classification results and the samples used as a reference.
```
# Load classification results
output_dir <- paste0(path.expand('~/work'), "/bdc-article", "/results", "/S2_10_16D_STK_1")
classification_results <- raster::raster(
paste0(output_dir, "/cube_to_classify_merged_probs_class_2018_8_2019_7_v1.tif")
)
# Load classification reference
validation_samples <- readRDS(url("https://brazildatacube.dpi.inpe.br/geo-knowledge-hub/bdc-article/validation-samples/validation-samples.rds"))
```
## <span style="color: #336699">Extract predicted values</span>
With the rasters and reference samples loaded, it will now be necessary to extract the values corresponding to each of the validation samples' locations from the classified raster. This extraction allows the comparison of the predicted and reference values.
> For extraction at each location, the `extract` function of the [raster package](https://cran.r-project.org/web/packages/raster/raster.pdf) is used.
```
predicted_values <- raster::extract(
x = classification_results,
y = validation_samples$geom
)
```
## <span style="color: #336699">Transform extracted values</span>
As described in the article, the classified elements were converted so that the evaluation process could be carried out. Here, the `Agriculture` and `Pasture` elements are unified to represent the `Anthropic` class of PRODES. The `Natural Vegetation` class is not changed for comparison.
> When the classification process is carried out, there is no way to specify the values assigned to each of the classes being identified. Thus, it may be necessary to check the classification values and their corresponding elements so that the values are used for comparison.
```
# Natural Vegetation (2)
predicted_values[predicted_values == 2] <- 5
# Pasture (2) and Agriculture (3) to PRODES Anthropic class
predicted_values[predicted_values == 1] <- 2
predicted_values[predicted_values == 3] <- 2
# Natural Vegetation (1)
predicted_values[predicted_values == 5] <- 1
```
## <span style="color: #336699">Evaluating</span>
```
sits_conf_matrix(list(
predicted = predicted_values,
reference = validation_samples$reference
))
```
| github_jupyter |
## Computing with Language: Statistics
```
import nltk
# make sure that NLTK language resources have been downloaded
# (see "NLTK Introduction" notebook)
from nltk.book import *
```
### [Word] Frequency Distributions
FreqDist is used to encode "frequency distributions", which count the number of times that each outcome of an experiment occurs.
* In case of text, its frequency distribution will contains counts of all tokens that appear in the text.
* Technically: FreqDist() creates a Python object (that holds information about a frequency distribution)
```
# frequency distribution of text1
fdist1 = FreqDist(text1)
print(fdist1)
```
**FreqDist** methods:
* freq(sample) - returns the number of times "sample" appears in FreqDist
* hapaxes() - a list of samples that appear only once
* max() - the sample with the maximum number of occurences
* plot() - plot a FreqDist chart
* pprint() - "pretty print" the first items of FreqDist
NLTK book: http://www.nltk.org/book/ch01.html#computing-with-language-simple-statistics
Full list of methods: http://www.nltk.org/api/nltk.html#nltk.probability.FreqDist
```
# print frequency distribution (top results)
fdist1.pprint()
# max()
fdist1.max()
# freq()
print("',' :", fdist1.freq(","))
print("whale:", fdist1.freq("whale"))
```
---
Information about Python dictionaries:
* ["Dictionaries and Structuring Data"](https://automatetheboringstuff.com/chapter5/)
```
# output of fdist1.pprint() looks like a Python "dictionary"
# can we look up its values by a given "key"?
fdist1["whale"]
# top 10 results (not that interesting for text)
# so nltk builds upon Counter type object
fdist1.most_common(10)
type(fdist1) #strongly suspect that theres is a Counter like dictionary lurking down there
%matplotlib inline
# plot the distribution
fdist1.plot(30)
import plotly.graph_objects as go
tuples = fdist1.most_common(50)
x = [t[0] for t in tuples]
y = [t[1] for t in tuples]
# fig = go.Figure(data=go.Scatter(x=x, y=y))
fig = go.Figure(data=go.Bar(x=x, y=y))
fig.show()
import string
tuples = fdist1.most_common(100)
tuples = [t for t in tuples if t[0] not in string.punctuation] # so we do not want any punctuation in results
x = [t[0] for t in tuples]
y = [t[1] for t in tuples]
# fig = go.Figure(data=go.Scatter(x=x, y=y))
fig = go.Figure(data=go.Bar(x=x, y=y))
fig.show()
# most_common() returs a list -> we can "slice" it
my_list = fdist1.most_common(100)
# results 50 through 59
my_list[50:60]
# least common results (first 10 examples)
fdist1.hapaxes()[:10]
```
### Words can appear both in lowercase and Capitalized
Let's fix our FreqDist:
```
# need to "lowercase" the text before passing it to FreqDist
# - see example in https://www.nltk.org/api/nltk.html#nltk.probability.FreqDist
fdist2 = FreqDist(word.lower() for word in text1) # so we normalize to lowercase
# we're going through the list of tokens in text,
# - returning (generating) lowercase versions of these tokens
# - and passing the result to FreqDist
# initial:
print(fdist1.freq("whale"))
print(fdist1.freq("Whale"))
print()
# fixed:
print(fdist2.freq("whale"))
```
### Cleaning data: removing stopwords
NLTK contains a corpus of *stopwords* - high-frequency words like "the", "to" and "also" - that we may want to filter out of a document before further processing.
Stopwords usually have little lexical content, and their presence in a text fails to distinguish it from other texts.
https://www.nltk.org/book/ch02#wordlist-corpora
```
from nltk.corpus import stopwords
# English stopwords
stop_words = stopwords.words("english")
stop_words[:8]
# let's start with text1 in lowercase
# return a list
# containing "word.lower()"
# for every item (stored in variable "word")
# in resource "text1"
text = [word.lower() for word in text1]
text[:7]
```
**NLTK book: [4.2 Operating on Every Element](https://www.nltk.org/book/ch01#operating-on-every-element)**
This *pattern* – doing something (e.g. modifying) with every item in a sequence and returning a list of results – is called Python *list comprehension*:
`result_list = [item.do_something() for item in list]`
List comprehensions may also contain conditions (only items matching the condition will be included in the resulting list):
`result_list = [item.do_something() for item in list `**`if`**` condition]`
It is very useful for filtering and modifying lists.
```
# we can filter either (a) text before calling FreqDist or (b) results of FreqDist.
# let's filter before calling FreqDist.
# create a set of stopwords (operations with sets are faster that with lists)
stop_set = set(stop_words)
# filter out stopwords (return only words not in the stoplist)
without_stopwords = [word for word in text if word not in stop_set]
text[:7]
# let's also filter out tokens that are not text or numbers
# Python has a built-in method .isalnum() that determines
# if a string only consists of letters or digits:
# https://docs.python.org/3/library/stdtypes.html#str.isalnum
filtered = [word for word in without_stopwords if word.isalnum()]
filtered[:7]
# word frequency
freq = FreqDist(filtered)
freq.most_common(15)
```
### Exploring data: finding interesting words
NLTK also includes a list of common English words. We can use it to find unusual or mis-spelt words in a text corpus.
See also: https://www.nltk.org/book/ch02#code-unusual
```
common_word_list = nltk.corpus.words.words()
# convert word list to a set (+ convert words to lowercase)
word_set = set(word.lower() for word in common_word_list)
# filter out common words
uncommon = [word for word in filtered if word not in word_set]
uncommon[:7]
# word frequency
freq = FreqDist(uncommon)
freq.most_common(15)
```
Note: in order to find really uncommon words we may need to clean data further (convert nouns to singular, etc.) or get a larger list of common words.
---
### Further information
[**Introduction to stylometry with Python**](https://programminghistorian.org/en/lessons/introduction-to-stylometry-with-python) by François Dominic Laramée
* uses FreqDist
Stylometry is the quantitative study of literary style through computational distant reading methods. It is based on the observation that authors tend to write in relatively consistent, recognizable and unique ways.
| github_jupyter |
```
import torch as t
import sys
sys.path.append('../')
from utils import get_score
import glob
import torch as t
import numpy as np
import json
import time
label_path = '/home/a/code/pytorch/zhihu/ddd/labels.json'
test_data_path='/home/a/code/pytorch/zhihu/ddd/test.npz'
index2qid = np.load(test_data_path)['index2qid'].item()
with open(label_path) as f:
labels_info = json.load(f)
qid2label = labels_info['d']
label2qid = labels_info['id2label']
files = glob.glob('../checkpoints/result/*test.pth')
files
r2=0
for file in files:
if 'MultiModel' not in file:
print file
r2+=t.load(file)
files = glob.glob('../checkpoints/result/tmp/*test*.pth')
wf = [_ for _ in files if 'weight5' not in _]
wf
raug=0
for file in wf:
if 'MultiModel' not in file:
raug+=t.load(file)
r2.size()
result=(r2+raug).topk(5,1)[1]
true_labels = [qid2label[index2qid[2999967-200000+ii]] for ii in range(len(r2))]
tmp_files
files = glob.glob('../checkpoints/result/tmp/*test*.pth')
r_aug=0
for file in files:
if 'MultiModel' not in file and 'weight5' not in file:
print file
r_aug+=t.load(file)
files = glob.glob('../checkpoints/result/tmp/*test*.pth')
r_multi=0
for file in files:
if 'MultiModel' in file :
print file
r_multi+=t.load(file)
result=(r2+r_aug+r_multi*5).topk(5,1)[1]
predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]
score,_,_,_ = get_score(predict_label_and_marked_label_list)
score
rr = r+8.5*r3
result = rr.topk(5,1)[1]
result = rr.topk(5,1)[1]
r=0
weights=[0,1,1,1,0,1,1,1,1]
r3=r
r=0
for a_,w_ in zip(results,weights):
r+=((a_)*w_)
r2=r+results[0]*7 + results[4]*8
result = r.topk(5,1)[1]
rows = range(result.size(0))
for ii,item in enumerate(result):
rows[ii] = [index2qid[ii]] + [label2qid[str(_)] for _ in item ]
import csv
with open('result0.43028.csv','w') as f:
writer = csv.writer(f)
writer.writerows(rows)
%history
import csv
line_no=217360
rate = 0.003
with open('../0811_1.csv') as f:
lines = [line.strip().split(',') for line in f.readlines()]
num = rate*line_no
stride = int(1/rate)
for line in lines[::stride]:
line[1:]=[-1,-1,-1,-1,-1]
with open('../0811_1_end.csv','w') as f:
writer = csv.writer(f)
writer.writerows(lines)
lines[0]
0.429477037467622/0.99
predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]
score,_,_,_ = get_score(predict_label_and_marked_label_list)
score
def target(args):
w1,w2,w3 = args
r = a + b*w1 +c*w2 + d*w3
result = r.topk(5,1)[1]
predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]
score,_,_,_ = get_score(predict_label_and_marked_label_list)
print (args,score,_)#list_space = [hp.uniform('a',0,1),hp.uniform('b',0,1)]
return -score
d=results[0]
d[-1].sum()
predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]
score,_,_,_ = get_score(predict_label_and_marked_label_list)
score
score
#coding:utf8
label_path = '/mnt/7/zhihu/ieee_zhihu_cup/data/labels.json'
test_data_path='/mnt/7/zhihu/ieee_zhihu_cup/data/test.npz'
def ensamble(file1,file2,label_path=label_path, test_data_path=test_data_path,result_csv=None):
if result_csv is None:
result_csv = time.strftime('%y%m%d_%H%M%S.csv')
a = t.load(file1)
b = t.load(file2)
r = 9.0*a+b
result = r.topk(5,1)[1]
index2qid = np.load(test_data_path)['index2qid'].item()
with open(label_path) as f: label2qid = json.load(f)['id2label']
rows = range(result.size(0))
for ii,item in enumerate(result):
rows[ii] = [index2qid[ii]] + [label2qid[str(_)] for _ in item ]
import csv
with open(result_csv,'w') as f:
writer = csv.writer(f)
writer.writerows(rows)
if __name__ == '__main__':
import fire
fire.Fire()
files = ['CNNText_tmp0.4024_char_test.pth',
'CNNText_tmp0.4024_char_val.pth',
'DeepText0.4103_word_test.pth',
'Inception0.4110_word.pth',
'LSTMText0.4119_word.pth',
'LSTMText0.4031_char_test.pth',
'LSTMText0.4119_word_test.pth',
'DeepText0.4103_word_val.pth',
'CNNText_tmp0.4109_word_val.pth',
'LSTMText0.4119_word_val.pth',
'RCNN_0.4115_word_test.pth',
'RCNN_0.4037_char_val.pth',
'LSTMText0.4031_char_val.pth',
'RCNN_0.4115_word_val.pth',
'RCNN_0.4037_char_test.pth',
'CNNText_tmp0.4109_word_test.pth']
```
| github_jupyter |
## PyBEAM Tutorial 5: Fixing model parameters.
In this tutorial, we demonstrate how to input a fixed parameter into a model instead of a prior.
First, as before, import PyBEAM's default module.
```
# import PyBEAM's default module
import pybeam.default as pbd
```
For this example, we define a base model with a uniform contamination. This gives us the following model dictionary.
```
# define base model
model = {'type' : 'base', # model type ('base' or 'ugm')
'sigma' : 1.0, # sets sigma, the scaling parameter
'threshold' : 'fixed', # sets threshold type (fixed, linear, exponential, or weibull)
'leakage' : False, # if True, drift rate has leaky integration
'delay' : False, # if True, decision threshold motion is delayed
'contamination' : True} # if True, uniform contamination added to model
# outputs parameters used by your model
pbd.parse_model(model)
```
We now simulate a data set.
```
# parameters for synthetic data
phi = {'t_nd' : 0.25, # non-decision time
'w' : 0.5, # relative start point
'mu' : 1.0, # drift rate
'a' : 0.5, # decision threshold location
'g' : 0.05, # contamination strength
'gl' : 0.0, # contamination model lower bound
'gu' : 2.0} # contamination model upper bound
# generate synthetic data
rt = pbd.simulate_model(N_sims = 500, # number of data points to simulate
model = model, # dictionary containing model information
phi = phi) # parameters used to simulate data
# plot data and model likelihood function
pbd.plot_rt(model = model, # dictionary containing model information
phi = phi, # parameters used for model rt distribution
rt = rt); # dictionary of simulated rt data
```
We now prepare to call the parameter inference function. We first build our dictionary of priors. However, let's say that we want to fix 'gl' and 'gu' at single values instead of making them priors?
To accomplish this, instead of writing a prior in the prior dictionary, we simply put the constant we desire as the value. In this case, we want our contamination model to have bounds of 0.0 and 2.0, matching the generated data set; so, for those prior keys, we input those values.
```
# define priors
p = {'pt_nd' : 'Uniform("t_nd", lower = 0.0, upper = 0.75)', # prior for non-decision time
'pw' : 'Uniform("w", lower = 0.3, upper = 0.7)', # prior for relative start
'pmu' : 'Uniform("mu", lower = -5.0, upper = 5.0)', # prior for drift rate
'pa' : 'Uniform("a", lower = 0.25, upper = 2.0)', # prior for threshold location
'pg' : 'Uniform("g", lower = 0.0, upper = 0.75)', # prior for contamination strength
'gl' : 0.0, # uniform contamination lower bound
'gu' : 2.0} # uniform contamination upper bound
```
We now proceed as normal to complete parameter inference.
```
# define model condition
c = {'rt' : rt,
't_nd' : 'pt_nd',
'w' : 'pw',
'mu' : 'pmu',
'a' : 'pa',
'g' : 'pg',
'gl' : 'gl',
'gu' : 'gu'}
# load into condition dictionary
cond = {0 : c}
# run parameter inference
trace = pbd.inference(model = model,
priors = p,
conditions = cond,
samples = 25000,
chains = 3,
cores = 3,
file_name = 'tutorial5')
# plot posteriors
pbd.plot_trace(file_name = 'tutorial5', burnin = 12500);
```
| github_jupyter |
# Voigt profile
```
from exojax.spec import voigt
import jax.numpy as jnp
import matplotlib.pyplot as plt
```
Let's compute the Voigt function $V(\nu, \beta, \gamma_L)$ using exojax!
$V(\nu, \beta, \gamma_L)$ is a convolution of a Gaussian with a STD of $\beta$ and a Lorentian with a gamma parameter of $\gamma_L$.
```
nu=jnp.linspace(-10,10,100)
plt.plot(nu, voigt(nu,1.0,2.0)) #beta=1.0, gamma_L=2.0
```
The function "voigt" is vmapped for nu (input=0), therefore a bit hard to handle when you want to differentiate. Instead, you can use "voigtone", which is not vmapped for all of the input arguments.
```
from exojax.spec import voigtone
from jax import grad, vmap
dvoigt_nu=vmap(grad(voigtone,argnums=0),(0,None,None),0) #derivative by nu
dvoigt_beta=vmap(grad(voigtone,argnums=1),(0,None,None),0) #derivative by beta
plt.plot(nu, voigt(nu,1.0,2.0),label="$V(\\nu,\\beta=1,\\gamma_L=2)$")
plt.plot(nu, dvoigt_nu(nu,1.0,2.0),label="$\\partial_\\nu V(\\nu,\\beta=1,\\gamma_L=2)$")
plt.plot(nu, dvoigt_beta(nu,1.0,2.0),label="$\\partial_\\beta V(\\nu,\\beta=1,\\gamma_L=2)$")
plt.legend()
```
## HMC-NUTS of a simple absorption model
Next, we try to fit a simple absorption model to mock data.
The absorption model is
$ e^{-a V(\nu,\beta,\gamma_L)}$
```
def absmodel(nu,a,beta,gamma_L):
return jnp.exp(-a*voigt(nu,beta,gamma_L))
```
Adding a noise...
```
from numpy.random import normal
data=absmodel(nu,2.0,1.0,2.0)+normal(0.0,0.01,len(nu))
plt.plot(nu,data,".")
```
Then, let's perfomr a HMC-NUTS.
```
import arviz
import numpyro.distributions as dist
import numpyro
from numpyro.infer import MCMC, NUTS
from numpyro.infer import Predictive
from numpyro.diagnostics import hpdi
def model_c(nu,y):
sigma = numpyro.sample('sigma', dist.Exponential(1.0))
a = numpyro.sample('a', dist.Exponential(1.0))
beta = numpyro.sample('beta', dist.Exponential(1.0))
gamma_L = numpyro.sample('gammaL', dist.Exponential(1.0))
mu=absmodel(nu,a,beta,gamma_L)
numpyro.sample('y', dist.Normal(mu, sigma), obs=y)
from jax import random
rng_key = random.PRNGKey(0)
rng_key, rng_key_ = random.split(rng_key)
num_warmup, num_samples = 1000, 2000
kernel = NUTS(model_c,forward_mode_differentiation=True)
mcmc = MCMC(kernel, num_warmup, num_samples)
mcmc.run(rng_key_, nu=nu, y=data)
posterior_sample = mcmc.get_samples()
pred = Predictive(model_c,posterior_sample)
predictions = pred(rng_key_,nu=nu,y=None)
median_mu = jnp.median(predictions["y"],axis=0)
hpdi_mu = hpdi(predictions["y"], 0.9)
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(nu,median_mu,color="C0")
ax.plot(nu,data,"+",color="C1",label="data")
ax.fill_between(nu, hpdi_mu[0], hpdi_mu[1], alpha=0.3, interpolate=True,color="C0",
label="90% area")
plt.xlabel("$\\nu$",fontsize=16)
plt.legend()
```
We got a posterior sampling.
```
refs={};refs["sigma"]=0.01;refs["a"]=2.0;refs["beta"]=1.0;refs["gammaL"]=2.0
arviz.plot_pair(arviz.from_numpyro(mcmc),kind='kde',\
divergences=False,marginals=True,reference_values=refs,\
reference_values_kwargs={'color':"red", "marker":"o", "markersize":12})
plt.show()
```
## curve of growth
As an application, we consider the curve of growth.
The curve of growth is the equivalent width evolution as a function of the absorption sterngth. Here, it corresponds to $a$. Let's see, the growth of absorption feature as
```
nu=jnp.linspace(-100,100,10000)
aarr=jnp.logspace(-3,3,10)
for a in aarr:
plt.plot(nu,absmodel(nu,a,0.1,0.1))
```
Let us define the equivalent width by a simple summation of the absorption.
```
def EW(a):
return jnp.sum(1-absmodel(nu,a,0.1,0.1))
vEW=vmap(EW,0,0)
```
This is the curve of growth. As you see, when the absorption is weak, the power of the curve is proportional to unity (linear region). But, as increasing the absorption sterength, the power converges to 1/2.
```
aarr=jnp.logspace(-3,3,100)
plt.plot(aarr,vEW(aarr))
plt.yscale("log")
plt.xscale("log")
plt.xlabel("a")
plt.ylabel("equivalent width")
plt.show()
```
Now we have auto-diff for the Voigt. So, we can directly compute the power as a function of $a$.
$power = \frac{\partial}{\partial \log_{10} a } \log_{10} ( EW ) $
```
def logEW(loga):
return jnp.log10(jnp.sum(1-absmodel(nu,10**(loga),0.1,0.1)))
dlogEW=grad(logEW)
vlogdEW=vmap(dlogEW,0,0)
logaarr=jnp.linspace(-3,3,100)
plt.plot(10**(logaarr),vlogdEW(logaarr))
plt.axhline(1.0,label="linear limit",color="gray",ls="dashed")
plt.axhline(0.5,label="damped limit",color="gray",ls="dotted")
plt.xscale("log")
plt.xlabel("a")
plt.ylabel("power")
plt.legend()
plt.show()
```
| github_jupyter |
# Western Australia Rental Prices - EDA 12
https://www.kaggle.com/c/deloitte-western-australia-rental-prices/
```
# imports
import csv
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
from __future__ import division
from sklearn import preprocessing
%pylab inline
# settings and constants
%logstop
%logstart -o 'EDA12' rotate
plt.rcParams['figure.figsize'] = (10.0, 8.0)
pd.set_option('display.max_rows', 70)
start_time = pd.datetime.now()
print start_time
train = pd.read_csv('train12.csv')
#train.columns = map(str.lower, train.columns)
train.ren_date_eff_from = pd.to_datetime(train.ren_date_eff_from)
train.set_index('ren_id', inplace=True)
train[:2]
#test = pd.read_csv('data/test.csv', low_memory=False)
train.describe().T
train.sort_values(by='ren_base_rent').ren_base_rent.plot(use_index=False)
min_rent = 0
max_rent = 100
bucket_size = 5
num_buckets = int((max_rent - min_rent)/bucket_size)
bucket_labels = [bucket_size * x for x in range(1,num_buckets+1)]
rent_range = train[(train.ren_base_rent > min_rent) & (train.ren_base_rent < max_rent)].copy()
rent_range.loc[:,'bucket'] = pd.cut(rent_range.ren_base_rent, num_buckets, labels=bucket_labels)
rent_range.groupby('bucket').count().ren_id.plot(kind='bar')
train.house_area.sort_values(ascending=False)[20:].plot(use_index=False)
train.total_area.sort_values(ascending=False)[20:200].plot(use_index=False)
# total area is not useful
train[train.total_area.notnull()].total_area.count()
train.effective_rooms.sort_values(ascending=True)[2000:2050]
pd.set_option('display.max_rows', 50)
train.loc[322529]
train_last_rent = pd.read_csv('train_last_rent.csv')
train_last_rent.set_index('ren_id', inplace=True)
train_last_rent.shape
train.shape
train['last_rent'] = train_last_rent.last_rent
train.to_csv('train12_last_rent.csv')
train_last_rent[:5]
fig = plt.figure()
ax = train.ren_date_eff_from.groupby(train.ren_date_eff_from.dt.year).count().plot(kind='bar',sharex=True)
ax2 = ax.twinx()
ax2.plot(train.ren_base_rent.groupby(train.ren_date_eff_from.dt.year).mean().values, color='red')
# let's cut out the pre 2000 average rent
fig = plt.figure()
ax = train.ren_date_eff_from.groupby(train.ren_date_eff_from.dt.year).count().plot(kind='bar',sharex=True)
ax2 = ax.twinx()
cut_pre_2000 = train[train.ren_date_eff_from > pd.datetime(2000,1,1)]
cut_pre_2000 = cut_pre_2000[cut_pre_2000.ren_date_eff_from < pd.datetime(2015,1,1)]
ax2.plot(range(10,25),
cut_pre_2000.ren_base_rent.groupby(train.ren_date_eff_from.dt.year).mean().values, color='red')
fig = plt.figure()
rent_growth = cut_pre_2000.ren_base_rent.groupby(cut_pre_2000.ren_date_eff_from.dt.year).mean().values
x = np.arange(len(rent_growth))
m, b = np.polyfit(x,rent_growth, 1)
print m, b
plt.plot(x, m*x + b, '-')
plt.xticks(x, np.arange(2001, 2001+len(rent_growth)), rotation='vertical')
plt.plot(x, cut_pre_2000.ren_base_rent.groupby(train.ren_date_eff_from.dt.year).mean().values, color='red')
(29. / (10*12))
100 * 7 * .24713
test.ren_date_eff_from = pd.to_datetime(test.ren_date_eff_from)
test.ren_date_eff_from.groupby(test.ren_date_eff_from.dt.year).count().plot(kind='bar', figsize=(10,7))
m = (29.656 / (10*12))
b = 110
fig = plt.figure()
from_date = pd.datetime(2001,1,1)
to_date = pd.datetime(2008,1,1)
the_delta = to_date - from_date
years = int(the_delta.days / 365)
rent_growth = cut_pre_2000.ren_base_rent.groupby(cut_pre_2000.ren_date_eff_from.dt.year).mean().values
subsample = train[train.ren_date_eff_from > from_date]
subsample = subsample[subsample.ren_date_eff_from < to_date]
subsample.ren_date_eff_from = subsample.ren_date_eff_from + the_delta
subsample.ren_base_rent = (subsample.ren_base_rent * years * m) + b
new_fit = subsample.ren_base_rent.groupby(train.ren_date_eff_from.dt.year).mean().values
x = np.arange(len(new_fit))
print m, b, years
plt.xticks(x, np.arange(2008, 2008+len(rent_growth)), rotation='vertical')
plt.plot(x, rent_growth[len(rent_growth)-len(new_fit):], color='red')
plt.plot(x, new_fit, color='green')
# do we have data on any property rented more than once ? No.
len(train.index) == len(train.index.unique())
len(train.lan_id.unique()) / len(train.index)
len(test.lan_id.unique()) / len(test.index)
len(set(test.lan_id.unique()) & set(train.lan_id.unique()))
test.financial_status.value_counts().sum()
train.carbay_under_cover.value_counts()
# !!!
train.alfresco_room.value_counts()
# !!!
train.big_rooms.value_counts()
train.effective_rooms.value_counts()
# !!!
# combine shed_area_1 and area_of_sheds
train.dining_room.value_counts()
train.toilet.value_counts()
test_orig = pd.read_csv('data/test.csv')
test_orig[:2]
test_orig.REN_DATE_EFF_FROM = pd.to_datetime(test_orig.REN_DATE_EFF_FROM)
test_orig['REN_YEAR'] = test_orig.REN_DATE_EFF_FROM.dt.year
test_orig[:2]
test_orig.REN_YEAR.value_counts()
len(test.index)
train_orig = pd.read_csv('data/train.csv')
train_orig[:2]
train_orig.REN_DATE_EFF_FROM = pd.to_datetime(train_orig.REN_DATE_EFF_FROM)
train_orig['REN_YEAR'] = train_orig.REN_DATE_EFF_FROM.dt.year
train_orig[:2]
train_orig.REN_YEAR.value_counts()
# calc how old rent_year - year_effective_new
train.groupby('rent_year').mean().ren_base_rent.plot()
for y in [1994,1995,1996,1997,1998]:
print y, train[train.rent_year == y].mean().ren_base_rent
train[train.rent_year == 1995].sort_values('ren_base_rent').ren_base_rent.plot(use_index=False)
train[train.rent_year == 1996].sort_values('ren_base_rent').ren_base_rent.plot(use_index=False)
train[train.rent_year == 1997].sort_values('ren_base_rent').ren_base_rent.plot(use_index=False)
```
# LAND
```
# land table columns
landc = ['lan_multiple_zoning_flag',
'lan_survey_strata_ind',
'lan_srd_taxable',
'lan_id_type',
'lan_power',
'lan_water',
'lan_gas',
'lan_drainage',
'lan_date_subdivision_mfp',
'lan_lst_code',
'lan_lds_nubmer',
'lan_lds_number_id_type3',
'lan_lds_number_is_rural',
'lan_house_no',
'lan_house_no_sfx',
'lan_address_situation',
'lan_lot_no',
'lan_unit_no',
'lan_date_redundant_eff',
'lan_date_subdivision_lga',
'lan_date_subdivision_wapc',
'lan_reserve_class',
'lan_sketch_id',
'lan_location',
'lan_urban_map_grid',
'lan_id1_survey_no',
'lan_id1_alpha_lot',
'lan_id1_lot_no',
'lan_id1_part_lot',
'lan_id1_lease_part',
'lan_id1_section',
'lan_id1_type',
'lan_id1_town_lot',
'lan_id1_town_lot_type',
'lan_id2_lot',
'lan_id2_part_lot',
'lan_id2_lease_part',
'lan_id2_type',
'lan_id2_alpha_prefix',
'lan_id2_alpha_suffix',
'lan_id3_type',
'lan_id3_lease_reserve_no',
'lan_id3_part_lot',
'lan_id3_lease_part',
'lan_date_survey_strata',
'lan_part_lot_source',
'lan_date_lease_expiry',
'lan_date_lease_from',
'lan_str_id',
'lan_str_id_has_corner',
'llg_date_eff_from',
'lds_name',
'lds_code',
'lds_status',
'str_name',
'str_sty_code',
'corner_str_name',
'corner_str_status',
'corner_str_sty_code',
'sub_name',
'sub_postcode',
'urt_date_eff_from',
'urt_urban_rural_ind']
import sqlalchemy as sqla
engine = sqla.create_engine('postgresql://paulperry:ciao,ciao@localhost:5432/australia',
connect_args={'client_encoding': 'latin1'})
metadata = sqla.MetaData()
land_col_counts = []
for s in landc:
qland = 'select count(distinct '+s+') from land;'
land_col_counts.append([s,pd.read_sql_query(qland, engine)])
break
land_col_counts
qz = 'select area_albers_sqm, count(t4.ve_number) as cnt \
from demographics t1 \
right join demographics_key t2 on (t1.sa1_7 = t2.sa1_7)\
right join land_valuation_key t3 on (t2.lan_id = t3.lan_id) \
right join train t4 on (t3.ve_number = t4.ve_number) \
group by area_albers_sqm;'
area_albers_sqm = pd.read_sql_query(qz, engine)
# area_albers_sqm.sort_values(by='count', ascending=False)
print len(area_albers_sqm.index)
area_albers_sqm.cnt.sum()
qz = 'select sub_postcode, count(t3.ve_number) as cnt \
from land t1 \
right join land_valuation_key t2 on (t1.lan_id = t2.lan_id) \
right join train t3 on (t2.ve_number = t3.ve_number) \
group by sub_postcode;'
sub_postcode = pd.read_sql_query(qz, engine)
print len(sub_postcode)
sub_postcode.cnt.sum()
sub_postcode.sort_values(by='cnt', ascending=True)[:20]
```
| github_jupyter |
```
from queue import Queue
import numpy as np
import math
class SystolicArrayCell:
def __init__(self):
self.pos_x = 0
self.pos_y = 0
self.array_size = 0
# Each cell has the concept of a "partial sum" and an "activation".
# These take one cycle to cross each cell (they would be delayed
# with a register). To model this in python, we'll have a <field>
# variable that represents the value driven by the neighboring cell,
# and a <field>_out value representing the value driven by this cell.
# partial sum: the running sum of the products, transmitted vertically
self.partial_sum = 0
self.partial_sum_out = 0
# activation: the input activation value, transmitted horizontally
self.activation = 0
self.activation_out = 0
# weight: The weight representing the second value to be multiplied
self.weight = 0
# Input fields, which will hold the connection to the cells or FIFOs
# above and to the left of this cell
self.input_activation = None
self.input_partial_sum = None
#ring register
self.receive_cell = None #接收数据寄存器
self.receive_reg = 0
self.receive_out = 0
#self.send = #发送数据寄存器
#On chip buffer
self.result_bank_input = None
#edge update
self.process_id = 0
self.process_id_out = 0
self.src = 0
self.dst = 0
self.rb_depth = 0
self.rb_value = 0
self.edge_compute = True
self.edge_number = 0
# In the hardware implementation, we would use a control flow signal and
# weight inputs via the partial sum lines (note that a weight is only half
# the bits of that field, allowing control flow to be transmitted
# alongside). For simplification here, we'll just say it's hacked in by
# magic.
def set_weight(self, weight):
self.weight = weight
# Connects this cell to its neighbors above and to the left
def connect(self, pos_x, pos_y, array):
self.pos_x = pos_x
self.pos_y = pos_y
self.array_size = array.array_size
self.edge_number = pos_y
# If we're at x position zero, then our left neighbor is a FIFO queue
if self.pos_x is 0:
self.input_activation = array.input[self.pos_y]
# Otherwise, it's another cell
else:
self.input_activation = array.cells[self.pos_y][self.pos_x - 1]
# If we're at y position zero, then our above neighbor is nothing
if self.pos_y is 0:
# All partial sums from here will just be 0
self.input_partial_sum = None
# Otherwise, our above neighbor is another cell
else:
self.input_partial_sum = array.cells[self.pos_y - 1][self.pos_x]
#ring dataflow
if self.pos_y is array.array_size-1:
self.receive_cell = array.cells[0][self.pos_x] #cell 第一个代表行数,也就是Y, 第二个代表列数,也就是X
# Otherwise, it's another cell
else:
self.receive_cell = array.cells[self.pos_y+1][self.pos_x]
#each PE on the same row connect to the same result bank
self.result_bank_input = array.result_bank[self.pos_y][self.pos_x]
self.edge_bank = array.edge_bank[self.pos_x]
def set_process_id(self, idx):
self.process_id = idx
# We'll model the transfer of signals through registers with a read() and a
# compute() method.
# read() represents the registers sampling data at the positive edge of the
# clock
def read(self, edge_update):
# Read the left neighbor
# If this is a FIFO queue, take its value (or 0 if it's empty)
if type(self.input_activation) is Queue:
if self.input_activation.empty():
self.activation = 0
else:
self.activation = self.input_activation.get()
# If it is a cell, we read the value from activation_out
else:
self.activation = self.input_activation.activation_out
# Read the above neighbor
# If this is not connected, then the partial sum is always 0
if self.input_partial_sum is None:
self.partial_sum = 0
# Otherwise, read the partial sum from the above cell
else:
self.partial_sum = self.input_partial_sum.partial_sum_out
#ring dataflow
if self.edge_bank.empty():
self.edge_compute = False
elif edge_update:
self.edge_compute = True
self.src, self.dst = self.edge_bank.get()
elif self.src == self.process_id:
self.edge_compute = True
self.src, self.dst = self.edge_bank.get()
else:
self.edge_compute = False
self.src = self.src
self.dst = self.dst
if(edge_update):
self.receive_reg = self.edge_number
self.process_id = self.process_id
else:
self.receive_reg = self.receive_cell.receive_out
self.process_id = self.receive_cell.process_id_out
self.rb_depth = int(self.dst/self.array_size)
print("cell({:d},{:d}) src {:d}, dst {:d}, process_id {:d}". format(self.pos_x, self.pos_y, self.src, self.dst, self.process_id))
self.rb_value = self.result_bank_input[self.rb_depth]
# compute() represents combinational logic that takes place between
# positive edges of the clock (multiplication and addition)
def compute(self):
# First, the weight and activation in are multiplied
product = self.weight * self.activation
# Then that value is added to the partial sum from above and transmitted
# downwards
self.partial_sum_out = self.partial_sum + product
# And the activation is transmitted to the right
self.activation_out = self.activation
#ring dataflow
if self.edge_compute:
self.result_bank_input[self.rb_depth] = self.rb_value + self.receive_reg
self.receive_out = self.receive_reg
self.process_id_out = self.process_id
#print(self.edge_number)
def cell_state(self):
#print("cell({:d},{:d}),rec_reg={:d}, rec_out={:d}, proc_id={:d}, proc_out={:d}". format(self.pos_x, self.pos_y, self.receive_reg, self.receive_out, self.process_id, self.process_id_out))
print("cell({:d},{:d}),rec_reg={:d}, proc_id={:d}, rb_value={:d}". format(self.pos_x, self.pos_y, self.receive_reg, self.process_id, self.rb_value))
# This represents our entire array: cells, inputs, and outputs
class SystolicArray:
# We'll take a parameter for the size of the square arrays to be multiplied
def __init__(self, array_size):
self.array_size = array_size
# "cells" will hold the array of processing elements
self.cells = []
# This array is a square with dimensions "array_size"
for _ in range(self.array_size):
row = []
for _ in range(self.array_size):
cell = SystolicArrayCell()
row.append(cell)
self.cells.append(row)
# The inputs and outputs will both be FIFO queues
self.input = [Queue() for _ in range(self.array_size)]
self.output = [Queue() for _ in range(self.array_size)]
self.edge_bank = [Queue() for _ in range(self.array_size)]
self.result_bank = [[list() for _ in range(array_size)] for _ in range(array_size)]
# When all cells and inputs are created, then they can be connected
# (again, this would be accomplished with wiring)
for row_num, row in enumerate(self.cells):
for col_num, cell in enumerate(row):
cell.connect(col_num, row_num, self) #每一行对应一个pos_y, 每一列对应一个pos_x
# Accept a 2d array of weights, and "hack" them in. The hardware way to
# fill weights is interesting but outside the scope of this demo.
def fill_weights(self, weights):
for row_num, row in enumerate(weights):
for col_num, weight in enumerate(row):
self.cells[row_num][col_num].set_weight(weight)
# Accept a 2d array of activations.
def fill_activations(self, activations):
# For the systolic array to function properly, the activations must be
# padded with a triangle of zeroes
for row_num in range(self.array_size):
for _ in range(row_num):
self.input[row_num].put(0)
# And the activations must be transposed before being added to the
# input queue
for row_num in range(self.array_size):
col = [activations[x][row_num] for x in range(self.array_size)]
for activation in col:
self.input[row_num].put(activation)
def fill_result(self, result):
for row_num in range(self.array_size):
for i in range(self.array_size):
self.result_bank[row_num].append(i)
#ring dataflow
def fill_edges(self, edges):
for row_num in range(self.array_size):
for idx_ in range(len(edges[row_num])):
print(edges[row_num][idx_])
self.edge_bank[row_num].put(edges[row_num][idx_])
def fill_result_banks(self, num_nodes):
for row_num in range(self.array_size):
for idx_ in range(self.array_size):
for _ in range(math.ceil(num_nodes/self.array_size)):
self.result_bank[row_num][idx_].append(0)
def fill_idx(self,idx):
for col_num in range(self.array_size):
for row_num in range(self.array_size):
self.cells[col_num][row_num].set_process_id(idx[col_num])
# For this demo, all cells will read() the values of their neighbors first
def read(self,edge_update):
for row in self.cells:
for cell in row:
cell.read(edge_update)
# And then after all cells have read(), they will compute() the next step
def compute(self):
for row in self.cells:
for cell in row:
cell.compute()
# After each step of compute(), new outputs will be but onto the output
# queue
for col_num in range(self.array_size):
self.output[col_num].put(self.cells[-1][col_num].partial_sum_out)
def show_staus(self):
for row in self.cells:
for cell in row:
cell.cell_state()
# Each cycle involves a read() and a compute()
def cycle(self, edge_update):
# read() models register sampling on the positive edge of the clock
self.read(edge_update)
# compute() models the combinational logic between clock edges
self.compute()
self.show_staus()
# run() will execute the array's computation, assuming it's been filled
def run(self, num_nodes):
# It takes 3n-2 cycles to compute the full matrix of results
edge_update = True
for cycle in range(3*self.array_size - 2):
print("-----Cycle----{:d}----------". format(cycle))
self.cycle(edge_update)
edge_update = False
self.get_edge_output(num_nodes)
return self.get_outputs()
# The outputs are also staggered and transposed, so we'll format them
# before returning the results
def get_outputs(self):
ret = []
# Remove the staggering by throwing away the appropriate number of 0's
for col_num in range(self.array_size):
for _ in range(col_num + self.array_size - 1):
self.output[col_num].get()
# And transpose the results
for row_num in range(self.array_size):
row = []
for output_col in self.output:
row.append(output_col.get())
ret.append(row)
return ret
def get_edge_output(self, num_nodes):
for id_x in range(num_nodes):
print("id={:d}-|-{:d}". format(id_x, self.result_bank[id_x%self.array_size][0][0]))
# Here we'll use a small 3x3 test multiplication to see the systolic array
# in action
array_size = 3
myArray = SystolicArray(3)
activations = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
myArray.fill_activations(activations)
weights = [
[10, 20, 30],
[40, 50, 60],
[70, 80, 90]
]
myArray.fill_weights(weights)
myArray.fill_result(weights)
src = [0,1,2,1,2,0,2,0,1]
dst = [0,1,2,0,1,2,0,1,2]
edge_banks = []
for _ in range(array_size):
edge_banks.append(list(zip(src,dst)))
myArray.fill_edges(edge_banks)
idx = [0,1,2]
myArray.fill_idx(idx)
myArray.fill_result_banks(9)
res = myArray.run(3)
assert (res == np.matmul(activations, weights)).all()
print('Systolic array matches numpy matmul')
src = [1,2,3]
dst = [1,2,3]
B = []
A = list(zip(src,dst))
B.append(A)
B
src
dst = 2
row = int(dst%3)
row
int(dst%3)
```
| github_jupyter |
## produce label-noised training sets
```
# OPTIONAL: Load the "autoreload" extension so that code can change
%load_ext autoreload
# OPTIONAL: always reload modules so that as you change code in src, it gets loaded
%autoreload 2
import pandas as pd
import numpy as np
np.random.seed(42)
import random
random.seed(42)
import os
import sys
def noise_training_set(file, noise_per_step_positives, noise_per_step_negatives, pos_steps, neg_steps):
train_df = pd.read_json(file, compression='gzip', lines=True)
positives_df = train_df[train_df['label'] == 1].copy()
negatives_df = train_df[train_df['label'] == 0].copy()
pos_sample_amount = int(len(positives_df)*noise_per_step_positives)
neg_sample_amount = int(len(negatives_df)*noise_per_step_negatives)
pos_noise_index = []
neg_noise_index = []
for step in range(0,pos_steps):
positives_sample_df = positives_df.sample(n=pos_sample_amount, random_state=42)
pos_selected_index = list(positives_sample_df.index)
pos_noise_index.extend(pos_selected_index)
positives_df.drop(pos_selected_index, inplace=True)
for step in range(0,neg_steps):
negatives_sample_df = negatives_df.sample(n=neg_sample_amount, random_state=42)
neg_selected_index = list(negatives_sample_df.index)
neg_noise_index.extend(neg_selected_index)
negatives_df.drop(neg_selected_index, inplace=True)
train_df.loc[pos_noise_index, 'label'] = 0
train_df.loc[neg_noise_index, 'label'] = 1
file_name = os.path.basename(file)
new_file_name = file_name.replace('.json.gz', '_{:0.2f}_posnoise_{:0.2f}_negnoise.json.gz'.format(pos_steps*noise_per_step_positives, neg_steps*noise_per_step_negatives))
out_path = '../../../data/interim/wdc-lspc/training-sets-noised/'
os.makedirs(out_path, exist_ok=True)
train_df.to_json(out_path+new_file_name, compression='gzip', lines=True, orient='records')
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.01, 0.01, 1, 1)
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.02, 0.02, 1, 1)
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.03, 0.03, 1, 1)
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.04, 0.04, 1, 1)
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.05, 0.05, 1, 1)
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.1, 0.1, 1, 1)
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.1, 0.1, 2, 2)
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.1, 0.1, 3, 3)
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.1, 0.1, 4, 4)
noise_training_set('../../../data/raw/wdc-lspc/training-sets/computers_train_xlarge.json.gz', 0.1, 0.1, 5, 5)
```
| github_jupyter |
# Plan observable transits tonight:
```
%matplotlib inline
from astroplan import (FixedTarget, Observer, is_event_observable,
AltitudeConstraint, AtNightConstraint,
MoonSeparationConstraint)
from astroplan.periodic import EclipsingSystem
import astropy.units as u
from astropy.time import Time
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
```
Download a table of known transiting exoplanets:
Create an astroplan [`EclipsingSystem`](https://astroplan.readthedocs.io/en/latest/api/astroplan.EclipsingSystem.html) object for each transiting exoplanet:
```
from astroquery.nasa_exoplanet_archive import NasaExoplanetArchive
from astropy.table import Column
nasa_exoplanet_table = NasaExoplanetArchive.get_confirmed_planets_table(cache=False, all_columns=True)
nasa_transit_table = nasa_exoplanet_table[nasa_exoplanet_table['pl_discmethod'] == 'Transit']
nasa_transit_table.add_index('pl_name')
from astropy.constants import G, M_sun, R_sun
inc = nasa_transit_table['pl_orbincl']
inc[inc == 0*u.deg] = 90*u.deg
# a = nasa_transit_table['pl_orbsmax']
period = nasa_transit_table['pl_orbper']
# Kepler's law
mstar = nasa_transit_table['st_mass']
mstar[mstar == 0*M_sun] = 1 * M_sun
a = (( G*mstar / (4*np.pi**2) * period**2 )**(1/3) ).decompose()
rp = nasa_transit_table['pl_radj']
rstar = nasa_transit_table['st_rad']
rstar[rstar == 0*R_sun] = 1*R_sun
ecc = nasa_transit_table['pl_orbeccen']
arstar = (a/rstar).decompose()
arstar[arstar == 0] = 10
b = arstar * np.cos(inc)
#rprstar = nasa_transit_table['pl_ratror']
rprstar = (rp/rstar).decompose()
ecc = nasa_transit_table['pl_orbeccen'].filled(0)
omega = np.radians(nasa_transit_table['pl_orblper'].filled(90))
durations = period/np.pi * np.arcsin( np.sqrt((1 + rprstar)**2 - b**2) / np.sin(inc) / arstar).value * np.sqrt(1 - ecc**2) / (1 + ecc*np.sin(omega))
durations[np.isnan(durations)] = 0*u.day
from astropy.constants import R_jup, R_sun
epochs = nasa_transit_table['pl_tranmid']
names = nasa_transit_table['pl_name']
simbad_names = nasa_transit_table['pl_name']
radius_ratio = (nasa_transit_table['pl_radj'] / nasa_transit_table['st_rad']).decompose().value
sky_coords = nasa_transit_table['sky_coord']
eclipsing_systems = []
transit_depths = []
coords = []
for p, t0, dur, name, rprs, coord in zip(periods, epochs, durations, names, radius_ratio, sky_coords):
if not any([isinstance(i, np.ma.core.MaskedConstant) for i in [p, t0, dur]]):
eclisping_sys = EclipsingSystem(Time(t0, format='jd'), p, duration=dur, name=name)
eclipsing_systems.append(eclisping_sys)
transit_depths.append(rprs**2)
coords.append(coord)
```
Initialize the observatory, and say we're observing tonight:
```
obs = Observer.at_site('KPNO')
start, end = obs.tonight(Time('2018-12-28'), horizon=-12*u.deg)
```
Only compute observablility for transits of exoplanets with $R_p/R_\star >$ `minimum_radius_ratio` :
```
minimum_depth = 0 # 0.0011
n_days = 1
altitude_limit = 15 * u.deg
transit_events = []
labels = 'Name, Depth, Ingress, Egress, Alt1, Alt2'.split(', ')
for day in range(n_days):
transits_day = []
for es, coord, depths in zip(eclipsing_systems, coords, transit_depths):
next_transit = es.next_primary_ingress_egress_time(start, n_eclipses=1)
ingress = next_transit[0, 0]
egress = next_transit[0, 1]
if egress < end+day*u.day and ingress > start+day*u.day:
ingress_alt = obs.altaz(ingress, coord).alt
egress_alt = obs.altaz(egress, coord).alt
observable = (ingress_alt > altitude_limit) and (egress_alt > altitude_limit)
if observable:
transits_day.append([es.name, round(depths, 4), next_transit[0, 0].iso, next_transit[0, 1].iso,
round(ingress_alt.degree, 0),
round(egress_alt.degree, 0)])
if len(transits_day) > 0:
transit_events.append(Table(rows=transits_day, names=labels))
else:
transit_events.append(None)
for i, table in enumerate(transit_events):
print('\n\nTransits on {0} UTC:'.format((start+i*u.day).datetime.date()))
if table is not None:
table.sort('Depth')
table.pprint(max_width=1000, max_lines=1000)
else:
print('None')
```
| github_jupyter |
```
"""Training script smpl deformation space experiment.
"""
import argparse
import json
import os
import glob
import numpy as np
from collections import defaultdict
import warnings
import time
import trimesh
from utils import render
import torch
import tqdm
from shapeflow.layers.deformation_layer import NeuralFlowDeformer
from shapeflow.layers.chamfer_layer import ChamferDistKDTree
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
# functions
def trace_pts(v_seq, idxs=[0]):
"""create tracer lines by tracing points.
Args:
v_seq: [nsteps, #v, 3]
"""
ns = v_seq.shape[0]
nt = len(idxs)
idxs = np.array(idxs)
v_traced = v_seq[:, idxs, :]
verts = v_traced.reshape(-1, 3)
sid = np.arange(ns*nt).reshape([ns, nt])[:-1].reshape(-1)
eid = np.arange(ns*nt).reshape([ns, nt])[1:].reshape(-1)
edges = np.stack([sid, eid], axis=1)
lines = render.line_meshes(verts, edges)
return lines
def edge_lengths(verts, edges):
verts_0 = verts[..., edges[:, 0], :]
verts_1 = verts[..., edges[:, 1], :]
edgelen = torch.norm(verts_1 - verts_0, dim=-1)
return edgelen # [batch, #e]
def spmatmul(den, sp):
"""
den: Dense tensor of shape batch_size x in_chan x #V
sp : Sparse tensor of shape newlen x #V
"""
batch_size, in_chan, nv = list(den.size())
new_len = sp.size()[0]
den = den.permute(2, 1, 0).contiguous().view(nv, -1)
res = torch.spmm(sp, den).view(new_len, in_chan, batch_size).contiguous().permute(2, 1, 0)
return res
```
# Load meshes
```
data_dir = "data/smpl_animation"
device = torch.device("cuda:0")
d = np.load(os.path.join(data_dir, "meshes.npz"))
faces = d['faces']
batch_verts = d['batch_verts']
n_shapes = len(batch_verts)
verts_t = torch.from_numpy(batch_verts).float().to(device) * 5
# edges
m = trimesh.Trimesh(batch_verts[0], faces, process=False)
edges = m.edges
edges_t = torch.from_numpy(edges).to(device)
```
# \[Experiment] Deform between pairs
[!] Run the different scenarios presented in the paper by switching between case names (as in Main Options).
Note that the divergence free versions are relatively slower due to additional compuational overhead.
## Main Options
```
case = "shapeflow_divfree_edge2" # choice of one of below
CASES = ["shapeflow_edge0", "shapeflow_divfree_edge0", "shapeflow_divfree_edge2"]
# NOTE: divfree versions might be slow...
```
## Additional Options
```
# options
deformer_nf = 16 # number of base feature layers in deformer
atol = 1e-2
rtol = 1e-2
nonlin = 'elu'
adjoint = False
solver = 'rk4'
niter = 1000 # number of iterations
optimizer = torch.optim.Adam
learning_rate = 2e-3
criterion = torch.nn.L1Loss() # torch.nn.MSELoss()
log_per_n_iter = 20
nsteps = 5 # number of integration steps. the higher this value, the more accurate the ODE integration is.
```
## Setup
```
# parse options
# combinations of parameters
OPTIONS_0 = {
"name": CASES[0],
"divfree": False,
"alpha_edge": 0,
}
OPTIONS_1 = {
"name": CASES[1],
"divfree": True,
"alpha_edge": 0,
}
OPTIONS_2 = {
"name": CASES[2],
"divfree": True,
"alpha_edge": 2,
}
OPTIONS = {CASES[0]: OPTIONS_0, CASES[1]: OPTIONS_1, CASES[2]: OPTIONS_2}
option = OPTIONS[case]
divfree = option["divfree"] # use divergence free parameterization
alpha_edge = option["alpha_edge"]
# setup model
deformer = NeuralFlowDeformer(latent_size=1, f_width=deformer_nf, s_nlayers=2,
s_width=5, method=solver, nonlinearity=nonlin, arch='imnet',
adjoint=adjoint, rtol=rtol, atol=atol, via_hub=False,
no_sign_net=False, return_waypoints=True, use_latent_waypoints=True,
divfree=divfree)
# for the linear case, we do not require the lat_params to be trainable.
lat_params = torch.linspace(-1, 1, nsteps)[:, None] # lat_params = torch.linspace(-1, 1, n_shapes)[:, None]
lat_params = lat_params.unsqueeze(0).to(device)
deformer.to(device)
optim = optimizer(deformer.parameters(), lr=learning_rate)
rev_seq = np.arange(lat_params.shape[1]-1, -1, -1)
```
## Training (Fitting)
```
# training loop:
v_src = verts_t[0:1] # [1, nverts, 3]
v_tar = verts_t[-10:-9] + torch.tensor([-.5, 0., 0.]).to(device) # [1, nverts, 3]
v_src_tar = torch.cat([v_src, v_tar], dim=0)
v_tar_src = torch.cat([v_tar, v_src], dim=0)
l_src_tar = torch.cat([lat_params, lat_params[:, rev_seq]], dim=0)
e_src = edge_lengths(v_src, edges_t)
a = torch.linspace(0, 1, nsteps).to(device)[:, None, None]
v_lin = (1-a) * v_src + a * v_tar
print(f"Optimizing for {niter} iterations...")
for it in range(niter):
optim.zero_grad()
v_prd_seq = deformer(v_src_tar, l_src_tar) # [nsteps, 2, nverts, 3]
v_prd_mean = 0.5*(v_prd_seq[:, 0] + v_prd_seq[rev_seq, 1])
v_p = v_prd_seq.reshape(-1, v_prd_seq.shape[-2], 3)
e_prd = edge_lengths(v_p, edges_t)
loss_fit = criterion(v_prd_mean, v_lin)
loss_edge = criterion(e_prd-e_src, torch.zeros_like(e_prd))
loss = loss_fit + loss_edge * alpha_edge
loss.backward()
optim.step()
if it % log_per_n_iter == 0:
print(f"Iter {it:4d}: loss {loss.item():.3e}, loss_fit {loss_fit.item():.3e}, "
f"loss_edge {loss_edge.item():.3e}")
```
## Visualize deformation results
```
# evaluate
neval = 21
subsamp = 4
lat_params_ = torch.linspace(-1, 1, neval)[None, :, None].to(device)
rev_seq_ = np.arange(lat_params_.shape[1]-1, -1, -1)
v_src_tar_ = torch.cat([v_src, v_tar], dim=0)
l_src_tar_ = torch.cat([lat_params_, lat_params_[:, rev_seq_]], dim=0)
with torch.no_grad():
v_prd_seq_ = deformer(v_src_tar_, l_src_tar_) # [nsteps, nverts, 3]
v_prd = v_prd_seq_.detach().cpu().numpy()
v_prd_mean = 0.5*(v_prd[:, 0] + v_prd[rev_seq_, 1])
meshes = [trimesh.Trimesh(v_prd_mean[i], faces) for i in range(v_prd.shape[0])]
mesh0 = trimesh.Trimesh(v_prd[0, 0], faces)
mesh1 = trimesh.Trimesh(v_prd[0, 1], faces)
color0 = np.array([1., .7, .4, .6])
color1 = np.array([.7, 1., .4, .6])
for i, m in enumerate(meshes):
a = i * 1 / (len(meshes) - 1)
c = a * color1 + (1-a) * color0
m.visual.vertex_colors = c
picks = np.genfromtxt("verts_trace.txt").astype(int)
# lines = trace_pts(v_prd_mean, idxs=picks)#[2545, 6000, 4165+1]
pts = v_prd_mean[::subsamp, picks, :].reshape([-1, 3])
pts = trimesh.PointCloud(pts, colors=np.array([1., 0., 0.]))
# Rendering view point
eye = [.2, 0., +1.]
zoom = 0.6
d_eye = np.linalg.norm(eye)
center = 0.5*np.sum(0.5*(mesh0.bounding_box.bounds + mesh1.bounding_box.bounds), 0)
eye = np.array(eye) / d_eye * zoom + center
up = [0, 1, 0]
# img_all, _, _, _ = render.render_trimesh(meshes[::subsamp][1:-1] + [mesh0, mesh1, lines, pts], eye, center, up, res=(2048*1.5, 2048), light_intensity=2, point_size=3)
img_all, _, _, _ = render.render_trimesh(meshes[::subsamp][1:-1] + [mesh0, mesh1, pts], eye, center, up, res=(2048*1.5, 2048), light_intensity=2, point_size=3)
name = option["name"]
subdir = "anim_debug"
os.makedirs(f'paper_figures/{subdir}/{name}', exist_ok=True)
for i, m in enumerate(meshes):
_ = m.export(os.path.join(f'paper_figures/{subdir}/{name}', f'mesh_{i}.ply'))
print(f"Animation sequence visualization; Case: {name}")
plt.figure(figsize=(24, 12))
plt.imshow(img_all)
plt.axis('off')
plt.show()
plt.savefig(f'paper_figures/{subdir}/{name}.png')
plt.show()
# save volume curve
print(f"Volume change through deformation; Case: {name}")
plt.figure()
alpha = np.linspace(0, 1, len(meshes))
rel_vol = np.array([m.volume/meshes[0].volume for m in meshes])
np.savez(f'paper_figures/{subdir}/{name}.npz', alpha=alpha, rel_vol=rel_vol)
plt.plot(alpha, rel_vol)
plt.ylim(.7, 1.1)
plt.show()
```
# Linear Interpolation
```
v0 = v_src[0].detach().cpu().numpy()
v1 = v_tar[0].detach().cpu().numpy()
alpha_ = alpha[:, None, None]
v_prd_mean = (1-alpha_) * v0 + alpha_ * v1
meshes = [trimesh.Trimesh(v_prd_mean[i], faces) for i in range(v_prd.shape[0])]
mesh0 = trimesh.Trimesh(v_prd[0, 0], faces)
mesh1 = trimesh.Trimesh(v_prd[0, 1], faces)
color0 = np.array([1., .7, .4, .6])
color1 = np.array([.7, 1., .4, .6])
for i, m in enumerate(meshes):
a = i * 1 / (len(meshes) - 1)
c = a * color1 + (1-a) * color0
m.visual.vertex_colors = c
picks = np.genfromtxt("verts_trace.txt").astype(int)
# lines = trace_pts(v_prd_mean, idxs=picks)#[2545, 6000, 4165+1]
pts = v_prd_mean[::subsamp, picks, :].reshape([-1, 3])
pts = trimesh.PointCloud(pts, colors=np.array([1., 0., 0.]))
# img_all, _, _, _ = render.render_trimesh(meshes[::subsamp] + [mesh0, mesh1, lines, pts], eye, center, up, res=(2048*1.5, 2048), light_intensity=2, point_size=3)
img_all, _, _, _ = render.render_trimesh(meshes[::subsamp] + [mesh0, mesh1, pts], eye, center, up, res=(2048*1.5, 2048), light_intensity=2, point_size=3)
name = "linear"
os.makedirs(f'paper_figures/{subdir}/{name}', exist_ok=True)
for i, m in enumerate(meshes):
_ = m.export(os.path.join(f'paper_figures/{subdir}/{name}', f'mesh_{i}.ply'))
# save volume curve
print(f"Volume change through deformation; Case: {name}")
plt.figure(figsize=(24, 12))
plt.imshow(img_all)
plt.axis('off')
# plt.show()
plt.savefig(f'paper_figures/{subdir}/{name}.png')
plt.show()
# save volume curve
print(f"Volume change through deformation; Case: {name}")
alpha = np.linspace(0, 1, len(meshes))
rel_vol = np.array([m.volume/meshes[0].volume for m in meshes])
np.savez(f'paper_figures/{subdir}/{name}.npz', alpha=alpha, rel_vol=rel_vol)
plt.plot(alpha, rel_vol)
plt.ylim(.7, 1.1)
plt.show()
```
# Plot combined volume change figure (AFTER running all cases, as well as the linear case)
```
# plot combined volume curve
names = ["linear", "shapeflow_edge0", "shapeflow_divfree_edge0", "shapeflow_divfree_edge2"]
labels = ["Linear Interpolation", "Shapeflow", "Shapeflow+Divfree", "Shapeflow+Divfree+Edge"]
plt.figure(figsize=(2.7, 3.5))
for name, label in zip(names, labels):
d = np.load(os.path.join(f"paper_figures/{subdir}/{name}.npz"))
plt.plot(d['alpha'], d['rel_vol'], label=label, linewidth=3)
# plt.legend(loc='lower center')
plt.xlim(0.0, 1.0)
plt.ylim(.65, 1.05)
plt.xticks([0, 0.5, 1])
plt.yticks(np.linspace(0.7, 1, 4))
plt.xlabel("Interpolation Coefficient")
plt.ylabel("Mesh Volume Ratio")
plt.tight_layout()
# plt.show()
plt.savefig(f"paper_figures/{subdir}/volume_change.pdf")
```
| github_jupyter |
# Guided Project: Visualizing Earnings Based On College Majors
We'll be working with a dataset on the job outcomes of students who graduated from college between 2010 and 2012. The original data on job outcomes was released by [American Community Survey](https://www.census.gov/programs-surveys/acs/), which conducts surveys and aggregates the data. FiveThirtyEight cleaned the dataset and released it on their [Github repo](https://github.com/fivethirtyeight/data/tree/master/college-majors).
Each row in the dataset represents a different major in college and contains information on gender diversity, employment rates, median salaries, and more. Here are some of the columns in the dataset:
- `Major_code` - Major code.
- `Major`- Major description.
- `Major_category` - Category of major.
- `Total` - Total number of people with major.
- `Sample_size` - Sample size (unweighted) of full-time.
- `Men` - Male graduates.
- `Women` - Female graduates.
- `ShareWomen` - Women as share of total.
- `Employed` - Number employed.
- `Median` - Median salary of full-time, year-round workers.
- `Low_wage_jobs` - Number in low-wage service jobs.
- `Full_time` - Number employed 35 hours or more.
- `Part_time` - Number employed less than 35 hours.
```
import pandas as pd
%matplotlib inline
recent_grads = pd.read_csv('recent-grads.csv')
recent_grads.iloc[0]
recent_grads.head()
recent_grads.tail()
raw_data_count = recent_grads.shape[0]
recent_grads.dropna(inplace = True) # dropping rows containing missing values
cleaned_data_count = recent_grads.shape[0]
print(raw_data_count, cleaned_data_count)
```
As one can see, one row with missing values has been dropped from our dataset.
### Pandas, Scatter Plots
We will plot the scatter plot among various columns to understand the corelation between them
#### Scatter plot : `Sample_size` vs `Median`
```
ax1 = recent_grads.plot(x = 'Sample_size', y= 'Median',
title = 'Fig 1, Scatter plot : Sample_size vs Median',
kind = 'scatter' )
```
One can observe that there is no corelation whatsoever b/w `Sample_size` and `Median`. As the former increases, the latter is unchanged.
#### Scatter plot : `Sample_size` vs `Unemployment_rate`
```
ax2 = recent_grads.plot(x = 'Sample_size', y= 'Unemployment_rate',
title = 'Fig 2, Scatter plot : Sample_size vs Unemployment_rate',
kind = 'scatter' )
```
#### Scatter plot : `Full_time` vs `Median`
```
ax3 = recent_grads.plot(x = 'Full_time', y= 'Median',
title = 'Fig 3, Scatter plot : Full_time vs Median',
kind = 'scatter' )
```
#### Scatter plot : `ShareWomen` vs `Unemployment_rate`
```
ax4 = recent_grads.plot(x = 'ShareWomen', y= 'Unemployment_rate',
title = 'Fig 4, Scatter plot : ShareWomen vs Unemployment_rate',
kind = 'scatter' )
```
#### Scatter plot : `Men` vs `Median`
```
ax5 = recent_grads.plot(x = 'Men', y= 'Median',
title = 'Fig 5, Scatter plot : Men vs Median',
kind = 'scatter' )
```
#### Scatter plot : `Men` vs `Median`
```
ax6 = recent_grads.plot(x = 'Women', y= 'Median',
title = 'Fig 6, Scatter plot : Women vs Median',
kind = 'scatter' )
```
#### Scatter plot : `ShareWomen` vs `Median`
```
ax7 = recent_grads.plot(x = 'ShareWomen', y= 'Median',
title = 'Fig 7, Scatter plot : ShareWomen vs Median',
kind = 'scatter' )
```
#### Some questions based on the above scatter plots
1. **Do students in more popular majors make more money?**
No, Fig 5 and Fig 6 imply that the median salary is not high for high number of men and women attending the courses
2. **Do students that majored in subjects that were majority female make more money?**
No, Fig 7 says there is a negative corelation. As the shareWomen percentage increases, the median income drops.
3. **Is there any link between the number of full-time employees and median salary?**
Even though the median income is very high for a lower full time employees, as the number increases the scatter plot mean is parallel to the `x-axis`suggesting nocorelation later on
### Histograms
```
import matplotlib.pyplot as plt
fig = plt.figure(figsize = (8,10))
fig.subplots_adjust(hspace = 0.5)
int_cols = ['Sample_size', 'Median', 'Employed', 'Full_time', 'ShareWomen',
'Unemployment_rate', 'Men', 'Women']
for idx, col in enumerate(int_cols):
title_str = 'Histogram of '+ str(col)
ax = fig.add_subplot(4, 2, idx + 1)
ax = recent_grads[col].hist(bins = 25)
ax.set_title(title_str)
ax.ticklabel_format(axis = 'x', style = 'sci', scilimits = (-1,1))
plt.show()
```
Using the above plots, we will try to explore the following questions:
**What percent of majors are predominantly male? Predominantly female?**
Refer the histogram of `ShareWomen`. If it is less than 0.5, it means the course is predominantly male. From the graph it can be observed that more than 60% of the courses are predominantly female
**What's the most common median salary range?**
35k - 40k. Refer the histogram of `Median`
```
from pandas.plotting import scatter_matrix
scatter_matrix(recent_grads[['Sample_size', 'Median']], figsize = (10, 10))
scatter_matrix(recent_grads[['Sample_size', 'Median', 'Unemployment_rate']], figsize = (15, 15))
```
Ont thing is certain, there is no corelation between `Sample_size`, `Median`, `Unemployment_rate`. As a variable increases, the other variable doesn't change.
#### Bar plot to compare the percentages of women (`ShareWomen`) from the first ten rows and last ten rows of the `recent_grads` dataframe
```
recent_grads['ShareWomen'].head(10).plot.bar()
recent_grads['ShareWomen'].tail(10).plot.bar()
```
| github_jupyter |
```
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Vertex client library: Local image classification model for online prediction
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_local_image_classification_online.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_local_image_classification_online.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
## Overview
This tutorial demonstrates how to use the Vertex client library for Python to deploy a locally trained custom image classification model for online prediction.
### Dataset
The dataset used for this tutorial is the [CIFAR10 dataset](https://www.tensorflow.org/datasets/catalog/cifar10) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck.
### Objective
In this notebook, you create a custom model locally in the notebook, then learn to deploy the locally trained model to Vertex, and then do a prediction on the deployed model. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console.
The steps performed include:
- Create a model locally.
- Train the model locally.
- View the model evaluation.
- Upload the model as a Vertex `Model` resource.
- Deploy the `Model` resource to a serving `Endpoint` resource.
- Make a prediction.
- Undeploy the `Model` resource.
### Costs
This tutorial uses billable components of Google Cloud (GCP):
* Vertex AI
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
## Installation
Install the latest version of Vertex client library.
```
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
```
Install the latest GA version of *google-cloud-storage* library as well.
```
! pip3 install -U google-cloud-storage $USER_FLAG
```
### Restart the kernel
Once you've installed the Vertex client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
```
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
## Before you begin
### GPU runtime
*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
5. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
```
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/vertex-ai/docs/general/locations)
```
REGION = "us-central1" # @param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
**Click Create service account**.
In the **Service account name** field, enter a name, and click **Create**.
In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
```
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you submit a custom training job using the Vertex client library, you upload a Python package
containing your training code to a Cloud Storage bucket. Vertex runs
the code from this package. In this tutorial, Vertex also saves the
trained model that results from your job in the same bucket. You can then
create an `Endpoint` resource based on this output in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
#### Import Vertex client library
Import the Vertex client library into our Python environment.
```
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
```
#### Vertex constants
Setup up the following constants for Vertex:
- `API_ENDPOINT`: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.
- `PARENT`: The Vertex location root path for dataset, model, job, pipeline and endpoint resources.
```
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
```
#### Hardware Accelerators
Set the hardware accelerators (e.g., GPU), if any, for prediction.
Set the variable `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
For GPU, available accelerators include:
- aip.AcceleratorType.NVIDIA_TESLA_K80
- aip.AcceleratorType.NVIDIA_TESLA_P100
- aip.AcceleratorType.NVIDIA_TESLA_P4
- aip.AcceleratorType.NVIDIA_TESLA_T4
- aip.AcceleratorType.NVIDIA_TESLA_V100
Otherwise specify `(None, None)` to use a container image to run on a CPU.
```
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
```
#### Container (Docker) image
Next, we will set the Docker container images for prediction
- Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:
- TensorFlow 1.15
- `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest`
- `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest`
- TensorFlow 2.1
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest`
- TensorFlow 2.2
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest`
- TensorFlow 2.3
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest`
- XGBoost
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest`
- Scikit-learn
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest`
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest`
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest`
For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
```
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU)
```
#### Machine Type
Next, set the machine type to use for prediction.
- Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VM you will use for prediction.
- `machine type`
- `n1-standard`: 3.75GB of memory per vCPU.
- `n1-highmem`: 6.5GB of memory per vCPU
- `n1-highcpu`: 0.9 GB of memory per vCPU
- `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*
```
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
```
# Tutorial
Now you are ready to start locally training a custom model CIFAR10, and then deploy the model to the cloud.
## Set up clients
The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.
You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.
- Model Service for `Model` resources.
- Endpoint Service for deployment.
- Prediction Service for serving.
```
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
```
## Train a model locally
In this tutorial, you train a CIFAR10 model locally.
### Set location to store trained model
You set the variable `MODEL_DIR` for where in your Cloud Storage bucket to save the model in TensorFlow SavedModel format.
Also, you create a local folder for the training script.
```
MODEL_DIR = BUCKET_NAME + "/cifar10"
model_path_to_deploy = MODEL_DIR
! rm -rf custom
! mkdir custom
! mkdir custom/trainer
```
#### Task.py contents
In the next cell, you write the contents of the training script task.py. We won't go into detail, it's just there for you to browse. In summary:
- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.
- Loads CIFAR10 dataset from TF Datasets (tfds).
- Builds a model using TF.Keras model API.
- Compiles the model (`compile()`).
- Sets a training distribution strategy according to the argument `args.distribute`.
- Trains the model (`fit()`) with epochs and steps according to the arguments `args.epochs` and `args.steps`
- Saves the trained model (`save(args.model_dir)`) to the specified model directory.
```
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
# Scaling CIFAR10 data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
# Build the Keras model
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
# Train the model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
```
### Train the model
```
! python custom/trainer/task.py --epochs=10 --model-dir=$MODEL_DIR
```
## Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.
```
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
```
## Evaluate the model
Now find out how good the model is.
### Load evaluation data
You will load the CIFAR10 test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels.
You don't need the training data, and hence why we loaded it as `(_, _)`.
Before you can run the data through evaluation, you need to preprocess it:
x_test:
1. Normalize (rescaling) the pixel data by dividing each pixel by 255. This will replace each single byte integer pixel with a 32-bit floating point number between 0 and 1.
y_test:<br/>
2. The labels are currently scalar (sparse). If you look back at the `compile()` step in the `trainer/task.py` script, you will find that it was compiled for sparse labels. So we don't need to do anything more.
```
import numpy as np
from tensorflow.keras.datasets import cifar10
(_, _), (x_test, y_test) = cifar10.load_data()
x_test = (x_test / 255.0).astype(np.float32)
print(x_test.shape, y_test.shape)
```
### Perform the model evaluation
Now evaluate how well the model in the custom job did.
```
model.evaluate(x_test, y_test)
```
## Upload the model for serving
Next, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model.
### How does the serving function work
When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.
The serving function consists of two parts:
- `preprocessing function`:
- Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph).
- Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.
- `post-processing function`:
- Converts the model output to format expected by the receiving application -- e.q., compresses the output.
- Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.
Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.
One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported.
### Serving function for image data
To pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes before it is passed as input to the deployed model.
To resolve this, define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU).
When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model:
- `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB).
- `image.convert_image_dtype` - Changes integer pixel values to float 32.
- `image.resize` - Resizes the image to match the input shape for the model.
- `resized / 255.0` - Rescales (normalization) the pixel data between 0 and 1.
At this point, the data can be passed to the model (`m_call`).
```
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
rescale = tf.cast(resized / 255.0, tf.float32)
return rescale
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
m_call = tf.function(model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
tf.saved_model.save(
model, model_path_to_deploy, signatures={"serving_default": serving_fn}
)
```
## Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
For your purpose, you need the signature of the serving function. Why? Well, when we send our data for prediction as a HTTP request packet, the image data is base64 encoded, and our TF.Keras model takes numpy input. Your serving function will do the conversion from base64 to a numpy array.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
```
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
```
### Upload the model
Use this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions.
The helper function takes the following parameters:
- `display_name`: A human readable name for the `Endpoint` service.
- `image_uri`: The container image for the model deployment.
- `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`.
The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters:
- `parent`: The Vertex location root path for `Dataset`, `Model` and `Endpoint` resources.
- `model`: The specification for the Vertex `Model` resource instance.
Let's now dive deeper into the Vertex model specification `model`. This is a dictionary object that consists of the following fields:
- `display_name`: A human readable name for the `Model` resource.
- `metadata_schema_uri`: Since your model was built without an Vertex `Dataset` resource, you will leave this blank (`''`).
- `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format.
- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
Uploading a model into a Vertex Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready.
The helper function returns the Vertex fully qualified identifier for the corresponding Vertex Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id.
```
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"cifar10-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
```
### Get `Model` resource information
Now let's get the model information for just your model. Use this helper function `get_model`, with the following parameter:
- `name`: The Vertex unique identifier for the `Model` resource.
This helper function calls the Vertex `Model` client service's method `get_model`, with the following parameter:
- `name`: The Vertex unique identifier for the `Model` resource.
```
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
```
## Deploy the `Model` resource
Now deploy the trained Vertex custom `Model` resource. This requires two steps:
1. Create an `Endpoint` resource for deploying the `Model` resource to.
2. Deploy the `Model` resource to the `Endpoint` resource.
### Create an `Endpoint` resource
Use this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter:
- `display_name`: A human readable name for the `Endpoint` resource.
The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter:
- `display_name`: A human readable name for the `Endpoint` resource.
Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the `Endpoint` resource: `response.name`.
```
ENDPOINT_NAME = "cifar10_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
```
Now get the unique identifier for the `Endpoint` resource you created.
```
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
```
### Compute instance scaling
You have several choices on scaling the compute instances for handling your online prediction requests:
- Single Instance: The online prediction requests are processed on a single compute instance.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.
- Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.
- Auto Scaling: The online prediction requests are split across a scaleable number of compute instances.
- Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.
The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.
```
MIN_NODES = 1
MAX_NODES = 1
```
### Deploy `Model` resource to the `Endpoint` resource
Use this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters:
- `model`: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline.
- `deploy_model_display_name`: A human readable name for the deployed model.
- `endpoint`: The Vertex fully qualified endpoint identifier to deploy the model to.
The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters:
- `endpoint`: The Vertex fully qualified `Endpoint` resource identifier to deploy the `Model` resource to.
- `deployed_model`: The requirements specification for deploying the model.
- `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs.
- If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic.
- If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100.
Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields:
- `model`: The Vertex fully qualified model identifier of the (upload) model to deploy.
- `display_name`: A human readable name for the deployed model.
- `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.
- `dedicated_resources`: This refers to how many compute instances (replicas) that are scaled for serving prediction requests.
- `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
- `min_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`.
- `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`.
#### Traffic Split
Let's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.
Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision.
#### Response
The method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources.
```
DEPLOYED_NAME = "cifar10_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
```
## Make a online prediction request
Now do a online prediction to your deployed model.
### Get test item
You will use an example out of the test (holdout) portion of the dataset as a test item.
```
test_image = x_test[0]
test_label = y_test[0]
print(test_image.shape)
```
### Prepare the request content
You are going to send the CIFAR10 image as compressed JPG image, instead of the raw uncompressed bytes:
- `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image.
- Denormalize the image data from \[0,1) range back to [0,255).
- Convert the 32-bit floating point values to 8-bit unsigned integers.
- `tf.io.read_file`: Read the compressed JPG images back into memory as raw bytes.
- `base64.b64encode`: Encode the raw bytes into a base 64 encoded string.
```
import base64
import cv2
cv2.imwrite("tmp.jpg", (test_image * 255).astype(np.uint8))
bytes = tf.io.read_file("tmp.jpg")
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
```
### Send the prediction request
Ok, now you have a test image. Use this helper function `predict_image`, which takes the following parameters:
- `image`: The test image data as a numpy array.
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed to.
- `parameters_dict`: Additional parameters for serving.
This function calls the prediction client service `predict` method with the following parameters:
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed to.
- `instances`: A list of instances (encoded images) to predict.
- `parameters`: Additional parameters for serving.
To pass the image data to the prediction service, in the previous step you encoded the bytes into base64 -- which makes the content safe from modification when transmitting binary data over the network. You need to tell the serving binary where your model is deployed to, that the content has been base64 encoded, so it will decode it on the other end in the serving binary.
Each instance in the prediction request is a dictionary entry of the form:
{serving_input: {'b64': content}}
- `input_name`: the name of the input layer of the underlying model.
- `'b64'`: A key that indicates the content is base64 encoded.
- `content`: The compressed JPG image bytes as a base64 encoded string.
Since the `predict()` service can take multiple images (instances), you will send your single image as a list of one image. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the `predict()` service.
The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction:
- `predictions`: Confidence level for the prediction, between 0 and 1, for each of the classes.
```
def predict_image(image, endpoint, parameters_dict):
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: {"b64": image}}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters_dict
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_image(b64str, endpoint_id, None)
```
## Undeploy the `Model` resource
Now undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters:
- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to.
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to.
This function calls the endpoint client service's method `undeploy_model`, with the following parameters:
- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed.
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed.
- `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource.
Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}.
```
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
```
# Cleaning up
To clean up all GCP resources used in this project, you can [delete the GCP
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
```
| github_jupyter |
# Assignment #01
This assignment is composed of 4 tasks that your teams is supposed to propose a solution to these tasks that uses the max-tree. The deliverable for this assignment is:
1. This jupyter-notebook completed with your solution.
- Create a GitHub repository and create a folder assignment-01. Put the jupyter-notebook with your solution in this repository and send the instructor the link. The same repository that your team created will be used for submitting the other assignments and your team final project.
You are being assessed based on:
1. Code execution - 20%
2. Clarity of the code (e.g., easy to follow, has pertinent comments, etc.) - 20%
3. Proper usage of the techniques seen in class - 40%
4. Quality of the results - 20%
More details about the assessment are available in the rubric document in the class GitHub repository.
```
! git clone https://github.com/rmsouza01/ENSF619.2.git
! git clone https://github.com/rmsouza01/siamxt.git; cd siamxt; python setup.py install
import numpy as np # numpy is the major library in which siamxt was built upon
# we like the array programming style =)
# We are using PIL to read images
from PIL import Image
# and matplotlib to display images
import matplotlib.pyplot as plt
import siamxt
```
**Part 1)** Using the max tree area signature analysis, determined CCs in the max tree
that separate Mona Lisa’s face from the background
```
# Loading the image.
# Make sure the image you read is either uint8 or uint16
img = np.asarray(Image.open("/content/ENSF619.2/Assignment01/Images/monalisa.png").convert("L"))
print(img.shape)
#Displaying the image
fig = plt.figure()
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.title("Original image")
plt.show()
# Include your solution here...
```
**Part 2)** Apply a series of filters to this image to segment the carotid arteries wall
```
# Loading the image.
# Make sure the image you read is either uint8 or uint16
img = np.asarray(Image.open("/content/ENSF619.2/Assignment01/Images/carotid.png").convert("L"))
print(img.shape)
#Displaying the image
fig = plt.figure()
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.title("Original image")
plt.show()
# Include your solution here...
```
**Parte 3)** Apply a series of filters to remove the white artifacts in the image
```
# Loading the image.
# Make sure the image you read is either uint8 or uint16
img = np.asarray(Image.open("/content/ENSF619.2/Assignment01/Images/filtering-image.jpg").convert("L"))
print(img.shape)
#Displaying the image
fig = plt.figure()
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.title("Original image")
plt.show()
# Include your solution here...
```
**Part 4)** Apply a series of filters that will remove all objects in the image except for the
two pens
```
# Loading the image.
# Make sure the image you read is either uint8 or uint16
img = np.asarray(Image.open("/content/ENSF619.2/Assignment01/Images/objects.png").convert("L"))
print(img.shape)
#Displaying the image
fig = plt.figure()
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.title("Original image")
plt.show()
# Include your solution here...
```
| github_jupyter |
# Anonymize data for figure 1
The original data used for plots are organized such that each row represents an editor and a comparable author. But since the dataframe contains identifying information of editors such as paper count, citation count, rank of first affiliation etc. that, once combined, may be able to identify an editor, we remove the ID of each row and shuffles data within each group of Year0 and field-of-study, such that you can no longer identify scientists from the data we use, while preserving the overall distribution of attributes of the population.
This notebook is only to show the steps taken to anonymize the data and **cannot be executed**.
```
import pandas as pd
import numpy as np
%%time
fields = (
pd.read_csv('../data/supplementary/AllFields.csv', sep='\t',
dtype={'Discipline':str, 'FieldOfStudyId':int})
.rename(columns={'FieldOfStudyId':'Parent'})
)
author_field = (
pd.read_csv('/scratch/fl1092/capstone/conflated/AuthorEraDisp.csv',
sep='\t', usecols=['NewAuthorId', 'Parent'], dtype={'NewAuthorId':int, 'Parent':int})
.merge(fields, on=['Parent'])
.drop('Parent', axis=1)
)
print(author_field.shape)
stats = pd.read_csv('/scratch/fl1092/capstone/temp/Figure1AllAuthors.csv', sep='\t',
usecols=['NewAuthorId','Yfp','Aylp','Parent','EditorsNewId','issn','Year0','Eylp',
'APriorPaperCount','EPriorPaperCount','APriorCitationCount','EPriorCitationCount',
'AHindex','EHindex','Arank','Erank','AColabCount','EColabCount'],
dtype={'NewAuthorId':int,'Yfp':int,'Aylp':int,'Parent':int,'EditorsNewId':int,'issn':str,
'Year0':int,'Eylp':int,'APriorPaperCount':int,'EPriorPaperCount':int,
'APriorCitationCount':int,'EPriorCitationCount':int, 'AHindex':int, 'EHindex':int,
'Arank':int,'Erank':int,'AColabCount':int,'EColabCount':int})
# whether the scientist is affiliated with top-100 institution or not
stats = stats.assign(ATop = stats.Arank <= 100)
stats = stats.assign(ETop = stats.Erank <= 100)
# only plot between 1980 and 2017
stats = stats[(stats.Year0 >= 1980) & (stats.Year0 <= 2017) ]
# plot the 15 fields
stats = stats.merge(fields, on=['Parent'])
stats = stats.assign(Age=stats.Year0-stats.Yfp+1)
estats = stats[['Parent', 'Yfp', 'Year0', 'EditorsNewId','issn','Age','Discipline',
'EPriorPaperCount','EPriorCitationCount','EHindex','ETop','EColabCount']].drop_duplicates()
## calculate the mean values of all sampled authors for each editor
outcomes = ['APriorPaperCount', 'APriorCitationCount', 'AHindex', 'ATop', 'AColabCount']
astats = (
stats.groupby(['EditorsNewId','issn','Parent','Year0','Discipline'])
.agg({x: np.mean for x in outcomes}).reset_index()
)
%%time
stats = stats.merge(author_field.rename(columns={'Discipline':'AuthorField'}), on='NewAuthorId')
print(stats.shape)
outcomes = ['PriorPaperCount', 'PriorCitationCount', 'Hindex', 'Top', 'ColabCount']
# shuffle within each group of year0 and discipline
# such that the distribution over time and discipline is preserved
shuffled_estats = (
estats[['Discipline', 'Parent', 'Year0', 'Age']]
.sort_values(by=['Discipline','Parent','Year0','Age'])
.reset_index(drop=True)
)
shuffled_astats = (
astats[['Discipline', 'Parent', 'Year0']]
.sort_values(by=['Discipline','Parent','Year0'])
.reset_index(drop=True)
)
shuffled_stats = (
stats[['AuthorField','Year0']]
.sort_values(by=['AuthorField','Year0'])
.reset_index(drop=True)
)
for outcome in outcomes:
es = (
estats[['Discipline','Parent', 'Year0', 'E'+outcome]]
.sort_values(by=['Discipline','Parent','Year0','E'+outcome])
.reset_index(drop=True)
)
shuffled_estats['E'+outcome] = es['E'+outcome]
ast = (
astats[['Discipline','Parent', 'Year0', 'A'+outcome]]
.sort_values(by=['Discipline','Parent','Year0','A'+outcome])
.reset_index(drop=True)
)
shuffled_astats['A'+outcome] = ast['A'+outcome]
st = (
stats[['AuthorField','Year0','A'+outcome]]
.sort_values(by=['AuthorField','Year0','A'+outcome])
.reset_index(drop=True)
)
shuffled_stats['A'+outcome] = st['A'+outcome]
shuffled_estats.to_csv('../data/figure_1/EditorStats.csv',sep='\t',index=False)
shuffled_astats.to_csv('../data/figure_1/AuthorStats.csv',sep='\t',index=False)
shuffled_stats.to_csv('../data/figure_1/AuthorIndividualStats.csv',sep='\t',index=False)
```
| github_jupyter |

[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_lingual/multi_class_text_classification/NLU_training_multi_lingual_multi_class_text_classifier_demo_amazon.ipynb)
# Training a Deep Learning Classifier with NLU
## ClassifierDL (Multi-class Text Classification)
## 3 class Amazon Phone review classifier training]
With the [ClassifierDL model](https://nlp.johnsnowlabs.com/docs/en/annotators#classifierdl-multi-class-text-classification) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem
This notebook showcases the following features :
- How to train the deep learning classifier
- How to store a pipeline to disk
- How to load the pipeline from disk (Enables NLU offline mode)
You can achieve these results or even better on this dataset with training data :
<br>

You can achieve these results or even better on this dataset with test data :
<br>

# 1. Install Java 8 and NLU
```
!wget https://setup.johnsnowlabs.com/nlu/colab.sh -O - | bash
import nlu
```
# 2. Download Amazon Unlocked mobile phones dataset
https://www.kaggle.com/PromptCloudHQ/amazon-reviews-unlocked-mobile-phones
dataset with unlocked mobile phone reviews in 5 review classes
```
! wget http://ckl-it.de/wp-content/uploads/2021/02/Amazon_Unlocked_Mobile_multi_lingual.csv
import pandas as pd
test_path = '/content/Amazon_Unlocked_Mobile_multi_lingual.csv'
train_df = pd.read_csv(test_path,sep=",")
cols = ["y","text"]
train_df = train_df[cols]
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(train_df, test_size=0.2)
train_df
```
# 3. Train Deep Learning Classifier using nlu.load('train.classifier')
You dataset label column should be named 'y' and the feature column with text data should be named 'text'
```
trainable_pipe = nlu.load('xx.embed_sentence.labse train.classifier')
# We need to train longer and user smaller LR for NON-USE based sentence embeddings usually
# We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch
# Also longer training gives more accuracy
trainable_pipe['classifier_dl'].setMaxEpochs(60)
trainable_pipe['classifier_dl'].setLr(0.005)
fitted_pipe = trainable_pipe.fit(train_df)
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
from sklearn.metrics import classification_report
print(classification_report(preds['y'], preds['trained_classifier']))
preds
```
# 3.1 evaluate on Test Data
```
preds = fitted_pipe.predict(test_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['trained_classifier']))
```
#4. Test Model with 20 languages!
```
train_df = pd.read_csv("Amazon_Unlocked_Mobile_multi_lingual.csv")
preds = fitted_pipe.predict(train_df[["test_sentences","y"]].iloc[:100],output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['trained_classifier']))
preds
```
# The Model understands Englsih

```
fitted_pipe.predict("It was like brand new ")
fitted_pipe.predict("It stopped working on the first day ")
```
# The Model understands German

```
# German for: 'It worked perfectly '
fitted_pipe.predict("Es hat perfekt funktioniert")
# German for: 'It stopped working on the first day'
fitted_pipe.predict("Am ersten Tag hörte es auf zu arbeiten ")
```
# The Model understands Chinese

```
# Chinese for: 'It was like brand new'
fitted_pipe.predict("就像全新 ")
# Chinese for: 'It stopped working on the first day'
fitted_pipe.predict("第一天停止工作 ")
```
# Model understands Afrikaans

```
# Afrikaans for: 'It worked perfectly '
fitted_pipe.predict("Dit het perfek gewerk")
# Afrikaans for: 'It stopped working on the first day'
fitted_pipe.predict("Dit het op die eerste dag opgehou werk ")
```
# The model understands Zulu

```
# Zulu for: 'It worked perfectly '
fitted_pipe.predict("Kusebenze ngokuphelele")
# Zulu for: 'It stopped working on the first day'
fitted_pipe.predict("Iyeke ukusebenza ngosuku lokuqala ")
```
# The Model understands Turkish

```
# Turkish for: 'It It worked perfectly '
fitted_pipe.predict("Mükemmel çalıştı")
# Turkish for: 'It stopped working on the first day'
fitted_pipe.predict("İlk gün çalışmayı bıraktı ")
```
# The Model understands Hebrew

```
# Hebrew for: 'It worked perfectly '
fitted_pipe.predict("זה עבד בצורה מושלמת")
# Hebrew for: 'It stopped working on the first day'
fitted_pipe.predict("זה הפסיק לעבוד ביום הראשון ")
```
# The Model understands Telugu

```
# Telugu for: 'It was like brand new'
fitted_pipe.predict("ఇది సరికొత్తది ")
# Telugu for: 'It stopped working on the first day'
fitted_pipe.predict("ఇది మొదటి రోజు పనిచేయడం మానేసింది ")
```
# Model understands Russian

```
# Russian for: 'It stopped working on the first day'
fitted_pipe.predict("Перестал работать в первый же день ")
# Russian for: 'It worked perfectly '
fitted_pipe.predict("Это сработало отлично")
```
# The Model understands French

```
# French for: 'It was the best ever !!'
fitted_pipe.predict("C'était la meilleure chose que je n'ai jamais faite !!")
# French for: 'It stopped working on the first day'
fitted_pipe.predict("Il a cessé de fonctionner le premier jour ")
```
# The Model understands Thai

```
# Thai for: 'It was the best ever !!'
fitted_pipe.predict("มันดีที่สุดเท่าที่เคยมีมา !!")
# Thai for: 'It stopped working on the first day'
fitted_pipe.predict("มันหยุดทำงานในวันแรก ")
```
# The Model understands Khmer

```
# Khmer for: 'It was like brand new'
fitted_pipe.predict("វាដូចជាម៉ាកថ្មី ")
# Khmer for: 'It stopped working on the first day'
fitted_pipe.predict("វាឈប់ធ្វើការនៅថ្ងៃដំបូង ")
```
# The Model understands Yiddish

```
# Yiddish for: 'It was the best ever !!'
fitted_pipe.predict("עס איז געווען דער בעסטער טאָמיד !!")
# Yiddish for: 'It stopped working on the first day'
fitted_pipe.predict("אויף דער ערשטער טאָג עס סטאַפּט ארבעטן ")
```
# The Model understands Kygrgyz

```
# Kygrgyz for: 'It was the best ever !!'
fitted_pipe.predict("Бул эң мыкты болду !!")
# Kygrgyz for: 'It stopped working on the first day'
fitted_pipe.predict("Биринчи күнү эле иштебей калды ")
```
# The Model understands Tamil

```
# Tamil for: 'It was the best ever !!'
fitted_pipe.predict("இது எப்போதும் சிறந்தது !! ")
# Tamil for: 'It stopped working on the first day'
fitted_pipe.predict("இது முதல் நாளில் வேலை செய்வதை நிறுத்தியது ")
```
# 5. Lets save the model
```
stored_model_path = './models/classifier_dl_trained'
fitted_pipe.save(stored_model_path)
```
# 6. Lets load the model from HDD.
This makes Offlien NLU usage possible!
You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk.
```
stored_model_path = './models/classifier_dl_trained'
hdd_pipe = nlu.load(path=stored_model_path)
preds = hdd_pipe.predict('It worked perfectly.')
preds
hdd_pipe.print_info()
```
| github_jupyter |
Implementing this paper: [Unsupervised Natural Language Generation with Denoising Autoencoders](https://arxiv.org/pdf/1804.07899.pdf)
Data from here: http://www.macs.hw.ac.uk/InteractionLab/E2E/#
```
from collections import Counter
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import utils
# For E2E Dataset
trainset = pd.read_csv('e2e-dataset/trainset.csv')
trainset = trainset.assign(clean=utils.replace_punctuation(trainset['ref']))
vocab_to_int, int_to_vocab = utils.get_tokens(trainset['clean'])
as_tokens = trainset['clean'].apply(lambda x: [vocab_to_int[each] for each in x.split()])
trainset = trainset.assign(tokenized=as_tokens)
def dataloader(dataset, p_drop=0.6, max_length=50):
# Corrupt dataset by randomly dropping words
corrupted = utils.corrupt(dataset)
# Shuffle words in each sequence
shuffled = [utils.shuffle(seq, cor_seq) for seq, cor_seq in zip(dataset, corrupted)]
for shuffled_seq, original_seq in zip(shuffled, dataset):
# need to make sure our input_tensors have at least one element
if len(shuffled_seq) == 0:
shuffled_seq = [original_seq[np.random.randint(0, len(original_seq))]]
input_tensor = torch.Tensor(shuffled_seq).view(-1, 1).type(torch.LongTensor)
# Append <EOS> token to the end of original sequence
target = original_seq.copy()
target.append(1)
target_tensor = torch.Tensor(target).view(-1, 1).type(torch.LongTensor)
yield input_tensor, target_tensor
class Encoder(nn.Module):
def __init__(self, vocab_size, embedding_size=300, hidden_size=256, num_layers=2, drop_p=0.5):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.lstm = nn.LSTM(embedding_size, hidden_size, num_layers=num_layers,
dropout=drop_p, bidirectional=True)
def forward(self, input, hidden):
embedded = self.embedding(input)
output, hidden = self.lstm(embedded, hidden)
return output, hidden
def init_hidden(self, device='cpu'):
""" Create two tensors with shape (num_layers * num_directions, batch, hidden_size)
for the hidden state and cell state
"""
h_0, c_0 = torch.zeros(2, 2*self.num_layers, 1, self.hidden_size, device=device)
return h_0, c_0
# Attention network from http://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
class Decoder(nn.Module):
def __init__(self, vocab_size, embedding_size=300, hidden_size=256,
num_layers=2, drop_p=0.1, max_length=50):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.max_length = max_length
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.attn = nn.Linear(self.hidden_size + embedding_size, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2 + embedding_size, self.hidden_size)
self.dropout = nn.Dropout(drop_p)
self.lstm = nn.LSTM(hidden_size, hidden_size, num_layers=num_layers,
dropout=drop_p, bidirectional=True)
self.out = nn.Linear(2 * hidden_size, vocab_size)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input)
embedded = self.dropout(embedded)
# Learns the attention vector (a probability distribution) here for weighting
# encoder outputs based on the decoder input and encoder hidden vector
attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0][0]), 1)), dim=1)
# Applies the attention vector (again, a probability distribution) to the encoder
# outputs which weight the encoder_outputs
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
# Now the decoder input is combined with the weighted encoder_outputs and
# passed through a linear transformation as input to the LSTM layer
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.lstm(output, hidden)
output = self.out(output).view(1, -1)
output = self.log_softmax(output)
return output, hidden, attn_weights
def init_hidden(self, device='cpu'):
""" Create two tensors with shape (num_layers * num_directions, batch, hidden_size)
for the hidden state and cell state
"""
h_0, c_0 = torch.zeros(2, 2*self.num_layers, 1, self.hidden_size, device=device)
return h_0, c_0
def train(dataset, encoder, decoder, enc_opt, dec_opt, criterion,
max_length=50, print_every=1000, plot_every=100,
teacher_forcing=0.5, device=None):
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
steps = 0
plot_losses = []
for input_tensor, target_tensor in dataloader(dataset):
loss = 0
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
steps += 1
input_tensor = input_tensor.to(device)
target_tensor = target_tensor.to(device)
enc_opt.zero_grad()
dec_opt.zero_grad()
h, c = encoder.init_hidden(device=device)
encoder_outputs = torch.zeros(max_length, 2*encoder.hidden_size).to(device)
# Run input through encoder
enc_outputs, enc_hidden = encoder.forward(input_tensor, (h, c))
# Prepare encoder_outputs for attention
encoder_outputs[:min(enc_outputs.shape[0], max_length)] = enc_outputs[:max_length,0,:]
# First decoder input is the <SOS> token
dec_input = torch.Tensor([[0]]).type(torch.LongTensor).to(device)
dec_hidden = enc_hidden
dec_outputs = []
for ii in range(target_tensor.shape[0]):
# Pass in previous output and hidden state
dec_out, dec_hidden, dec_attn = decoder.forward(dec_input, dec_hidden, encoder_outputs)
_, out_token = dec_out.topk(1)
# Curriculum learning, sometimes use the decoder output as the next input,
# sometimes use the correct token from the target sequence
if np.random.rand() < teacher_forcing:
dec_input = target_tensor[ii].view(*out_token.shape)
else:
dec_input = out_token.detach().to(device) # detach from history as input
dec_outputs.append(out_token)
loss += criterion(dec_out, target_tensor[ii])
# If the input is the <EOS> token (end of sentence)...
if dec_input.item() == 1:
break
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(encoder.parameters(), 5)
nn.utils.clip_grad_norm_(decoder.parameters(), 5)
enc_opt.step()
dec_opt.step()
print_loss_total += loss
plot_loss_total += loss
if steps % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print(f"Loss avg. = {print_loss_avg}")
print([int_to_vocab[each.item()] for each in input_tensor])
print([int_to_vocab[each.item()] for each in dec_outputs])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# max length for attention
max_length = 50
encoder = Encoder(len(vocab_to_int), hidden_size=512, drop_p=0.1).to(device)
decoder = Decoder(len(vocab_to_int), hidden_size=512, drop_p=0.1, max_length=max_length).to(device)
enc_opt = optim.Adam(encoder.parameters(), lr=0.001, amsgrad=True)
dec_opt = optim.Adam(decoder.parameters(), lr=0.001, amsgrad=True)
criterion = nn.NLLLoss()
epochs = 10
for e in range(1, epochs+1):
print(f"Starting epoch {e}")
train(trainset['tokenized'], encoder, decoder, enc_opt, dec_opt, criterion,
teacher_forcing=0.9/e, device=device, print_every=4200,
max_length=max_length)
checkpoint = {"hidden_size": 256,
"num_layers": 512,
"encoder_sd": encoder.state_dict(),
"decoder_sd": decoder.state_dict(),
"epochs": 5}
torch.save(checkpoint, "nlg_07052018.pth"
```
| github_jupyter |
# Introduction
Decision trees are a powerful and popular machine learning technique. The basic concept is very similar to trees you may have seen commonly used to aid decision-making. The decision tree algorithm is a supervised learning algorithm -- we first construct the tree with historical data, and then use it to predict an outcome. One of the major advantages of decision trees is that they can pick up nonlinear interactions between variables in the data that linear regression can't
This example uses decision trees to look at individual income in the US. The data is from the 1994 census, and contains information on an individual's marital status, age, type of work, and more. The target column, or what we want to predict, is whether individuals make less than or equal to 50k a year, or more than 50k a year.
```
import pandas
# Set index_col to False to avoid pandas thinking that the first column is row indexes (it's age)
income = pandas.read_csv("C:/Users/Jennifer/Documents/Python/Data/income.csv", index_col=False)
income.head()
```
# Converting Categorical Variables
As we can see in the data, we have categorical variables such as workclass that have string values. Multiple individuals can share the same string value. The types of work include State-gov, Self-emp-not-inc, Private, and so on. Each of these strings is a label for a category. Another example of a column of categories is sex, where the options are Male and Female.
Before we get started with decision trees, we need to convert the categorical variables in our data set to numeric variables. This involves assigning a number to each category label, then converting all of the labels in a column to the corresponding numbers.
```
col = pandas.Categorical.from_array(income["workclass"])
income["workclass"] = col.codes
print(income["workclass"].head(5))
for name in ["education", "marital_status", "occupation", "relationship", "race", "sex", "native_country", "high_income"]:
col = pandas.Categorical.from_array(income[name])
income[name] = col.codes
```
# Using Decision Trees using Scikit Learn
We can use the scikit-learn package to fit a decision tree.
We use the DecisionTreeClassifier class for classification problems, and DecisionTreeRegressor for regression problems. The sklearn.tree package includes both of these classes.
In this case, we're predicting a binary outcome, so we'll use a classifier.
The first step is to train the classifier on the data. We'll use the fit method on a classifier to do this.
```
from sklearn.tree import DecisionTreeClassifier
# A list of columns to train with
# We've already converted all columns to numeric
columns = ["age", "workclass", "education_num", "marital_status", "occupation", "relationship", "race", "sex", "hours_per_week", "native_country"]
# Instantiate the classifier
# Set random_state to 1 to make sure the results are consistent
clf = DecisionTreeClassifier(random_state=1)
# We've already loaded the variable "income," which contains all of the income data
clf.fit(income[columns], income["high_income"])
```
# Splitting the dataset to train and test sets
Now that we've fit a model, we can make predictions. We'll want to split our data into training and testing sets first. If we don't, we'll be making predictions on the same data that we train our algorithm with. This leads to overfitting, and will make our error appear lower than it is.
```
import numpy
import math
# Set a random seed so the shuffle is the same every time
numpy.random.seed(1)
# Shuffle the rows
# This permutes the index randomly using numpy.random.permutation
# Then, it reindexes the dataframe with the result
# The net effect is to put the rows into random order
income = income.reindex(numpy.random.permutation(income.index))
train_max_row = math.floor(income.shape[0] * .8)
train = income.iloc[:train_max_row]
test = income.iloc[train_max_row:]
```
# Evaluating Error Using AUC
While there are many methods for evaluating error with classification, we'll use AUC. AUC ranges from 0 to 1, so it's ideal for binary classification. The higher the AUC, the more accurate our predictions.
We can compute AUC with the roc_auc_score function from sklearn.metrics. This function takes in two parameters:
y_true: true labels
y_score: predicted labels
It then calculates and returns the AUC value.
```
from sklearn.metrics import roc_auc_score
clf = DecisionTreeClassifier(random_state=1)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
error = roc_auc_score(test["high_income"], predictions)
print(error)
```
# Computing error on the training set
The AUC for the predictions on the testing set is about .694. Let's compare this against the AUC for predictions on the training set to see if the model is overfitting.
It's normal for the model to predict the training set better than the testing set. After all, it has full knowledge of that data and the outcomes. However, if the AUC between training set predictions and actual values is significantly higher than the AUC between test set predictions and actual values, it's a sign that the model may be overfitting.
```
predictions = clf.predict(train[columns])
print(roc_auc_score(train["high_income"], predictions))
```
# Decision Tree Overfitting
Our AUC on the training set was .947, and the AUC on the test set was .694. There's no hard and fast rule on when overfitting is occurring, but our model is predicting the training set much better than the test set. Splitting the data into training and testing sets doesn't prevent overfitting -- it just helps us detect and fix it.
There are three main ways to combat overfitting:
- "Prune" the tree after we build it to remove unnecessary leaves.
- Use ensembling to blend the predictions of many trees.
- Restrict the depth of the tree while we're building it.
While we'll explore all of these, we'll look at the third method first.
Limiting tree depth during the building process will result in more general rules. This prevents the tree from overfitting.
We can restrict tree depth by adding a few parameters when we initialize the DecisionTreeClassifier class:
- max_depth - Globally restricts how deep the tree can go
- min_samples_split - The minimum number of rows a node should have before it can be split; if this is set to 2, for example, - then nodes with 2 rows won't be split, and will become leaves instead
- min_samples_leaf - The minimum number of rows a leaf must have
- min_weight_fraction_leaf - The fraction of input rows a leaf must have
- max_leaf_nodes - The maximum number of total leaves; this will cap
```
# Decision trees model from the last screen
clf = DecisionTreeClassifier(random_state=1)
clf = DecisionTreeClassifier(min_samples_split=13, random_state=1)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
```
# Tweaking parameters to ajust AUC
By setting min_samples_split to 13, we managed to boost the test AUC from .694 to .700. The training set AUC decreased from .947 to .843, showing that the model we built was less overfit to the training set than before:Let's play around with parameters some more.
```
clf = DecisionTreeClassifier(random_state=1, min_samples_split=13, max_depth=7)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
```
We just improved the AUC again! The test set AUC increased to .744, while the training set AUC decreased to .748:
```
clf = DecisionTreeClassifier(random_state=1, min_samples_split=100, max_depth=2)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
```
Our accuracy went down on the previous cell, relative to the cell before it. This is because we're now underfitting. Underfitting is what occurs when our model is too simple to explain the relationships between the variables.
# Bias-Variance Tradeoff
By artificially restricting the depth of our tree, we prevent it from creating a model that's complex enough to correctly categorize some of the rows. If we don't perform the artificial restrictions, however, the tree becomes too complex, fits quirks in the data that only exist in the training set, and doesn't generalize to new data.
This is known as the bias-variance tradeoff. Imagine that we take a random sample of training data and create many models. If the models' predictions for the same row are far apart from each other, we have high variance. Imagine this time that we take a random sample of the training data and create many models. If the models' predictions for the same row are close together but far from the actual value, then we have high bias.
High bias can cause underfitting -- if a model is consistently failing to predict the correct value, it may be that it's too simple to model the data faithfully.
High variance can cause overfitting. If a model varies its predictions significantly based on small changes in the input data, then it's likely fitting itself to quirks in the training data, rather than making a generalizable model.
We call this the bias-variance tradeoff because decreasing one characteristic will usually increase the other.
# Conclusion - Part 1
Let's go over the main advantages and disadvantages of using decision trees. The main advantages of using decision trees is that they're:
- Easy to interpret
- Relatively fast to fit and make predictions
- Able to handle multiple types of data
- Able to pick up nonlinearities in data, and usually fairly accurate
The main disadvantage of using decision trees is their tendency to overfit.
Decision trees are a good choice for tasks where it's important to be able to interpret and convey why the algorithm is doing what it's doing.
The most powerful way to reduce decision tree overfitting is to create ensembles of trees. The random forest algorithm is a popular choice for doing this. In cases where prediction accuracy is the most important consideration, random forests usually perform better. Lets take a look at random forests
# Introduction to Random Forests
A random forest is a kind of ensemble model. Ensembles combine the predictions of multiple models to create a more accurate final prediction. We'll make a simple ensemble to see how they work.
Let's create two decision trees with slightly different parameters:
One with min_samples_leaf set to 2
One with max_depth set to 5
Then, we'll check their accuracies separately
```
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
columns = ["age", "workclass", "education_num", "marital_status", "occupation", "relationship", "race", "sex", "hours_per_week", "native_country"]
clf = DecisionTreeClassifier(random_state=1, min_samples_leaf=2)
clf.fit(train[columns], train["high_income"])
clf2 = DecisionTreeClassifier(random_state=1, max_depth=5)
clf2.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
print(roc_auc_score(test["high_income"], predictions))
predictions = clf2.predict(test[columns])
print(roc_auc_score(test["high_income"], predictions))
```
# Combining our predictors
When we have multiple classifiers making predictions, we can treat each set of predictions as a column in a matrix.Whenever we add more models to our ensemble, we just add more columns to the combined predictions. Ultimately, we don't want this matrix, though -- we want one prediction per row in the training data. There are many ways to get from the output of multiple models to a final vector of predictions. One method is majority voting, in which each classifier gets a "vote," and the most commonly voted value for each row "wins." This only works if there are more than two classifiers (and ideally an odd number, so we don't have to write a rule to break ties). Majority voting is what we applied in the example above.
We can use the RandomForestClassifier.predict_proba() method instead, which will predict a probability from 0 to 1 that a given class is the right one for a row. Because 0 and 1 are our two classes, we'll get a matrix containing the number of rows in the income dataframe, and two columns.
Each row will correspond to a prediction. The first column is the probability that the prediction is a 0, and the second column is the probability that the prediction is a 1. Each row adds up to 1.
If we just take the second column, we get the average value that the classifier would predict for that row. If there's a .9 probability that the correct classification is 1, we can use the .9 as the value the classifier is predicting. This will give us a continuous output in a single vector, instead of just 0 or 1.
Then we can add together all of the vectors we get through this method, and divide the sum by the total number of vectors to get the mean prediction made across the entire ensemble for a particular row. Finally, we round off to get a 0 or 1 prediction for the row.
If we use the predict_proba() method on both classifiers from the last screen to generate probabilities, take the mean for each row, and then round the results, we'll get ensemble predictions.
```
predictions = clf.predict_proba(test[columns])[:,1]
predictions2 = clf2.predict_proba(test[columns])[:,1]
combined = (predictions + predictions2) / 2
rounded = numpy.round(combined)
print(roc_auc_score(test["high_income"], rounded))
```
# Why ensembling works
The models are approaching the same problem in slightly different ways, and building different trees because we used different parameters for each one. Each tree makes different predictions in different areas. Even though both trees have about the same accuracy, when we combine them, the result is stronger because it leverages the strengths of both approaches.
The more "diverse" or dissimilar the models we use to construct an ensemble are, the stronger their combined predictions will be (assuming that all of the models have about the same accuracy). Ensembling a decision tree and a logistic regression model, for example, will result in stronger predictions than ensembling two decision trees with similar parameters. That's because those two models use very different approaches to arrive at their answers.
On the other hand, if the models we ensemble are very similar in how they make predictions, ensembling will result in a negligible boost.
# Introducing Variation with Bagging
A random forest is an ensemble of decision trees. If we don't make any modifications to the trees, each tree will be exactly the same, so we'll get no boost when we ensemble them. In order to make ensembling effective, we have to introduce variation into each individual decision tree model.
If we introduce variation, each tree will be be constructed slightly differently, and will therefore make different predictions. This variation is what puts the "random" in "random forest."
There are two main ways to introduce variation in a random forest -- bagging and random feature subsets. We'll dive into bagging first.
In a random forest, we don't train each tree on the entire data set. We train it on a random sample of the data, or a "bag," instead. We perform this sampling with replacement, which means that after we select a row from the data we're sampling, we put the row back in the data so it can be picked again. Some rows from the original data may appear in the "bag" multiple times.
Let's use bagging with the first tree we trained.
```
# We'll build 10 trees
tree_count = 10
# Each "bag" will have 60% of the number of original rows
bag_proportion = .6
predictions = []
for i in range(tree_count):
# We select 60% of the rows from train, sampling with replacement
# We set a random state to ensure we'll be able to replicate our results
# We set it to i instead of a fixed value so we don't get the same sample in every loop
# That would make all of our trees the same
bag = train.sample(frac=bag_proportion, replace=True, random_state=i)
# Fit a decision tree model to the "bag"
clf = DecisionTreeClassifier(random_state=1, min_samples_leaf=2)
clf.fit(bag[columns], bag["high_income"])
# Using the model, make predictions on the test data
predictions.append(clf.predict_proba(test[columns])[:,1])
combined = numpy.sum(predictions, axis=0) / 10
rounded = numpy.round(combined)
print(roc_auc_score(test["high_income"], rounded))
```
Using the bagging example from the previous screen, we gained some accuracy over a single decision tree. To be exact, we achieved an AUC score of around .733 with bagging, which is an improvement over the AUC score of .688 we got without bagging:
Let's go back to the decision tree algorithm we explored two missions ago to explain random feature subsets:
First we pick the maximum number of features we want to evaluate each time we split the tree.
This has to be less than the total number of columns in the data.
Every time we split, we pick a random sample of features from the data.
Then we compute the information gain for each feature in our random sample, and pick the one with the highest information gain to split on.
We're repeating the same process to select the optimal split for a node, but we'll only evaluate a constrained set of features that we select randomly. This introduces variation into the trees, and makes for more powerful ensembles.
We can also repeat the random subset selection process in scikit-learn. We just set the splitter parameter on DecisionTreeClassifier to "random", and the max_features parameter to "auto". If we have N columns, this will pick a subset of features of size N, compute the Gini coefficient for each (this is similar to information gain), and split the node on the best column in the subset.
This is essentially the same thing we did on the previous screen, but with far less typing.
```
# We'll build 10 trees
tree_count = 10
# Each "bag" will have 60% of the number of original rows
bag_proportion = .6
predictions = []
for i in range(tree_count):
# We select 60% of the rows from train, sampling with replacement
# We set a random state to ensure we'll be able to replicate our results
# We set it to i instead of a fixed value so we don't get the same sample every time
bag = train.sample(frac=bag_proportion, replace=True, random_state=i)
# Fit a decision tree model to the "bag"
clf = DecisionTreeClassifier(random_state=1, min_samples_leaf=2)
clf.fit(bag[columns], bag["high_income"])
# Using the model, make predictions on the test data
predictions.append(clf.predict_proba(test[columns])[:,1])
combined = numpy.sum(predictions, axis=0) / 10
rounded = numpy.round(combined)
print(roc_auc_score(test["high_income"], rounded))
predictions = []
for i in range(tree_count):
# We select 60% of the rows from train, sampling with replacement
# We set a random state to ensure we'll be able to replicate our results
# We set it to i instead of a fixed value so we don't get the same sample every time
bag = train.sample(frac=bag_proportion, replace=True, random_state=i)
# Fit a decision tree model to the "bag"
clf = DecisionTreeClassifier(random_state=1, min_samples_leaf=2, splitter="random", max_features="auto")
clf.fit(bag[columns], bag["high_income"])
# Using the model, make predictions on the test data
predictions.append(clf.predict_proba(test[columns])[:,1])
combined = numpy.sum(predictions, axis=0) / 10
rounded = numpy.round(combined)
print(roc_auc_score(test["high_income"], rounded))
```
# Putting it all together
Scikit-learn has a RandomForestClassifier class and a RandomForestRegressor class that enable us to train and test random forest models quickly.
When we instantiate a RandomForestClassifier, we pass in an n_estimators parameter that indicates how many trees to build. While adding more trees usually improves accuracy, it also increases the overall time the model takes to train.
```
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=5, random_state=1, min_samples_leaf=2)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
print(roc_auc_score(test["high_income"], predictions))
```
# Tweaking parameters to increase accuracy
Similar to decision trees, we can tweak some of the parameters for random forests, including:
- min_samples_leaf
- min_samples_split
- max_depth
- max_leaf_nodes
These parameters apply to the individual trees in the model, and change how they are constructed. There are also parameters specific to the random forest that alter its overall construction:
- n_estimators
- bootstrap - "Bootstrap aggregation" is another name for bagging; this parameter indicates whether to turn it on (Defaults to True)
# Reducing Overfitting
One of the major advantages of random forests over single decision trees is that they tend to overfit less. Although each individual decision tree in a random forest varies widely, the average of their predictions is less sensitive to the input data than a single tree is. This is because while one tree can construct an incorrect and overfit model, the average of 100 or more trees will be more likely to hone in on the signal and ignore the noise. The signal will be the same across all of the trees, whereas each tree will hone in on the noise differently. This means that the average will discard the noise and keep the signal.
In the following code cell, you'll see that we've fit a single decision tree to the training set, and made predictions for both the training and testing sets. The AUC for the training set predictions is .819, while the AUC for the testing set is .714. The fact that the test AUC is much lower than the train AUC indicates that the model is overfitting.
```
clf = DecisionTreeClassifier(random_state=1, min_samples_leaf=5)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(train[columns])
print(roc_auc_score(train["high_income"], predictions))
predictions = clf.predict(test[columns])
print(roc_auc_score(test["high_income"], predictions))
clf = RandomForestClassifier(n_estimators=150, random_state=1, min_samples_leaf=5)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(train[columns])
print(roc_auc_score(train["high_income"], predictions))
predictions = clf.predict(test[columns])
print(roc_auc_score(test["high_income"], predictions))
```
# Conclusion
As we can see in the code cell from the previous screen, overfitting decreased with a random forest, and accuracy went up overall.
While the random forest algorithm is incredibly powerful, it isn't applicable to all tasks. The main strengths of a random forest are:
- Very accurate predictions - Random forests achieve near state-of-the-art performance on many machine learning tasks. Along with neural networks and gradient-boosted trees, they're typically one of the top-performing algorithms.
- Resistance to overfitting - Due to their construction, random forests are fairly resistant to overfitting. We still need to set and tweak parameters like max_depth though.
The main weaknesses of using a random forest are:
- They're difficult to interpret - Because we've averaging the results of many trees, it can be hard to figure out why a random forest is making predictions the way it is.
- They take longer to create - Making two trees takes twice as long as making one, making three takes three times as long, and so on
| github_jupyter |
# **点云处理:实现PointNet点云分类**
**作者**:[Zhihao Cao](https://github.com/WhiteFireFox)<br>
**日期**:2021.12<br>
**摘要**:本示例在于演示如何基于 Paddle2.2 实现PointNet在ShapeNet数据集上进行点云分类处理。
## 一、环境设置
本教程基于Paddle 2.2 编写,如果你的环境不是本版本,请先参考官网[安装](https://www.paddlepaddle.org.cn/install/quick)。
```
import os
import numpy as np
import random
import h5py
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
print(paddle.__version__)
```
## 二、数据集
### 2.1 数据介绍
ShapeNet数据集是一个注释丰富且规模较大的 3D 形状数据集,由斯坦福大学、普林斯顿大学和芝加哥丰田技术学院于 2015 年联合发布。<br>
ShapeNet数据集官方链接:[https://vision.princeton.edu/projects/2014/3DShapeNets/](https://vision.princeton.edu/projects/2014/3DShapeNets/)<br>
AIStudio链接:[sharpnet数据集(经过整理)](https://aistudio.baidu.com/aistudio/datasetdetail/70460)<br>
ShapeNet数据集的储存格式是h5文件,该文件中key值分别为:
- 1、data:这一份数据中所有点的xyz坐标,
- 2、label:这一份数据所属类别,如airplane等,
- 3、pid:这一份数据中所有点所属的类型,如这一份数据属airplane类,则它包含的所有点的类型有机翼、机身等类型。
### 2.2 解压数据集
```
!unzip data/data70460/shapenet_part_seg_hdf5_data.zip
!mv hdf5_data dataset
```
### 2.3 数据列表
ShapeNet数据集所有的数据文件。
```
train_list = ['ply_data_train0.h5', 'ply_data_train1.h5', 'ply_data_train2.h5', 'ply_data_train3.h5', 'ply_data_train4.h5', 'ply_data_train5.h5']
test_list = ['ply_data_test0.h5', 'ply_data_test1.h5']
val_list = ['ply_data_val0.h5']
```
### 2.4 搭建数据生成器
说明:将ShapeNet数据集全部读入。
```
def make_data(mode='train', path='./dataset/', num_point=2048):
datas = []
labels = []
if mode == 'train':
for file_list in train_list:
f = h5py.File(os.path.join(path, file_list), 'r')
datas.extend(f['data'][:, :num_point, :])
labels.extend(f['label'])
f.close()
elif mode == 'test':
for file_list in test_list:
f = h5py.File(os.path.join(path, file_list), 'r')
datas.extend(f['data'][:, :num_point, :])
labels.extend(f['label'])
f.close()
else:
for file_list in val_list:
f = h5py.File(os.path.join(path, file_list), 'r')
datas.extend(f['data'][:, :num_point, :])
labels.extend(f['label'])
f.close()
return datas, labels
```
说明:通过继承`paddle.io.Dataset`来完成数据集的构造。
```
class PointDataset(paddle.io.Dataset):
def __init__(self, datas, labels):
super(PointDataset, self).__init__()
self.datas = datas
self.labels = labels
def __getitem__(self, index):
data = paddle.to_tensor(self.datas[index].T.astype('float32'))
label = paddle.to_tensor(self.labels[index].astype('int64'))
return data, label
def __len__(self):
return len(self.datas)
```
说明:使用飞桨框架提供的API:`paddle.io.DataLoader`完成数据的加载,使得按照Batchsize生成Mini-batch的数据。
```
# 数据导入
datas, labels = make_data(mode='train', num_point=2048)
train_dataset = PointDataset(datas, labels)
datas, labels = make_data(mode='val', num_point=2048)
val_dataset = PointDataset(datas, labels)
datas, labels = make_data(mode='test', num_point=2048)
test_dataset = PointDataset(datas, labels)
# 实例化数据读取器
train_loader = paddle.io.DataLoader(
train_dataset,
batch_size=128,
shuffle=True,
drop_last=False
)
val_loader = paddle.io.DataLoader(
val_dataset,
batch_size=32,
shuffle=False,
drop_last=False
)
test_loader = paddle.io.DataLoader(
test_dataset,
batch_size=128,
shuffle=False,
drop_last=False
)
```
## 三、定义网络
PointNet是斯坦福大学研究人员提出的一个点云处理网络,在这篇论文中,它提出了空间变换网络(T-Net)解决点云的旋转问题(注:因为考虑到某一物体的点云旋转后还是该物体,所以需要有一个网络结构去学习并解决这个旋转问题),并且提出了采取MaxPooling的方法极大程度上地提取点云全局特征。
### 3.1 定义网络结构
```
class PointNet(nn.Layer):
def __init__(self, name_scope='PointNet_', num_classes=16, num_point=2048):
super(PointNet, self).__init__()
self.input_transform_net = nn.Sequential(
nn.Conv1D(3, 64, 1),
nn.BatchNorm(64),
nn.ReLU(),
nn.Conv1D(64, 128, 1),
nn.BatchNorm(128),
nn.ReLU(),
nn.Conv1D(128, 1024, 1),
nn.BatchNorm(1024),
nn.ReLU(),
nn.MaxPool1D(num_point)
)
self.input_fc = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 9,
weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(paddle.zeros((256, 9)))),
bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(paddle.reshape(paddle.eye(3), [-1])))
)
)
self.mlp_1 = nn.Sequential(
nn.Conv1D(3, 64, 1),
nn.BatchNorm(64),
nn.ReLU(),
nn.Conv1D(64, 64, 1),
nn.BatchNorm(64),
nn.ReLU()
)
self.feature_transform_net = nn.Sequential(
nn.Conv1D(64, 64, 1),
nn.BatchNorm(64),
nn.ReLU(),
nn.Conv1D(64, 128, 1),
nn.BatchNorm(128),
nn.ReLU(),
nn.Conv1D(128, 1024, 1),
nn.BatchNorm(1024),
nn.ReLU(),
nn.MaxPool1D(num_point)
)
self.feature_fc = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 64*64)
)
self.mlp_2 = nn.Sequential(
nn.Conv1D(64, 64, 1),
nn.BatchNorm(64),
nn.ReLU(),
nn.Conv1D(64, 128, 1),
nn.BatchNorm(128),
nn.ReLU(),
nn.Conv1D(128, 1024, 1),
nn.BatchNorm(1024),
nn.ReLU()
)
self.fc = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Dropout(p=0.7),
nn.Linear(256, num_classes),
nn.LogSoftmax(axis=-1)
)
def forward(self, inputs):
batchsize = inputs.shape[0]
t_net = self.input_transform_net(inputs)
t_net = paddle.squeeze(t_net, axis=-1)
t_net = self.input_fc(t_net)
t_net = paddle.reshape(t_net, [batchsize, 3, 3])
x = paddle.transpose(inputs, (0, 2, 1))
x = paddle.matmul(x, t_net)
x = paddle.transpose(x, (0, 2, 1))
x = self.mlp_1(x)
t_net = self.feature_transform_net(x)
t_net = paddle.squeeze(t_net, axis=-1)
t_net = self.feature_fc(t_net)
t_net = paddle.reshape(t_net, [batchsize, 64, 64])
x = paddle.squeeze(x, axis=-1)
x = paddle.transpose(x, (0, 2, 1))
x = paddle.matmul(x, t_net)
x = paddle.transpose(x, (0, 2, 1))
x = self.mlp_2(x)
x = paddle.max(x, axis=-1)
x = paddle.squeeze(x, axis=-1)
x = self.fc(x)
return x
```
### 3.2 网络结构可视化
说明:使用飞桨API:`paddle.summary`完成模型结构可视化
```
pointnet = PointNet()
paddle.summary(pointnet, (64, 3, 2048))
```
## 四、训练
说明:模型训练的时候,将会使用`paddle.optimizer.Adam`优化器来进行优化。使用`F.nll_loss`来计算损失值。
```
def train():
model = PointNet(num_classes=16, num_point=2048)
model.train()
optim = paddle.optimizer.Adam(parameters=model.parameters(), weight_decay=0.001)
epoch_num = 10
for epoch in range(epoch_num):
# train
print("===================================train===========================================")
for batch_id, data in enumerate(train_loader()):
inputs, labels = data
predicts = model(inputs)
loss = F.nll_loss(predicts, labels)
acc = paddle.metric.accuracy(predicts, labels)
if batch_id % 20 == 0:
print("train: epoch: {}, batch_id: {}, loss is: {}, accuracy is: {}".format(epoch, batch_id, loss.numpy(), acc.numpy()))
loss.backward()
optim.step()
optim.clear_grad()
if epoch % 2 == 0:
paddle.save(model.state_dict(), './model/PointNet.pdparams')
paddle.save(optim.state_dict(), './model/PointNet.pdopt')
# validation
print("===================================val===========================================")
model.eval()
accuracies = []
losses = []
for batch_id, data in enumerate(val_loader()):
inputs, labels = data
predicts = model(inputs)
loss = F.nll_loss(predicts, labels)
acc = paddle.metric.accuracy(predicts, labels)
losses.append(loss.numpy())
accuracies.append(acc.numpy())
avg_acc, avg_loss = np.mean(accuracies), np.mean(losses)
print("validation: loss is: {}, accuracy is: {}".format(avg_loss, avg_acc))
model.train()
if __name__ == '__main__':
train()
```
## 五、评估与测试
说明:通过`model.load_dict`的方式加载训练好的模型对测试集上的数据进行评估与测试。
```
def evaluation():
model = PointNet()
model_state_dict = paddle.load('./model/PointNet.pdparams')
model.load_dict(model_state_dict)
model.eval()
accuracies = []
losses = []
for batch_id, data in enumerate(test_loader()):
inputs, labels = data
predicts = model(inputs)
loss = F.nll_loss(predicts, labels)
acc = paddle.metric.accuracy(predicts, labels)
losses.append(loss.numpy())
accuracies.append(acc.numpy())
avg_acc, avg_loss = np.mean(accuracies), np.mean(losses)
print("validation: loss is: {}, accuracy is: {}".format(avg_loss, avg_acc))
if __name__ == '__main__':
evaluation()
```
| github_jupyter |
# DOE Analysis for Beta Version
### National Renewable Energy Laboratory
#### Rob Hammond
##### 16 July 2020
```{warning}
**THIS WILL NOT RUN BECAUSE IT RELIES ON AN OUTDATED VERSION OF THE CODE**
```
<div class="admonition warning" name="html-admonition" style="background: red; padding: 10px">
**THIS WILL NOT RUN BECAUSE IT RELIES ON AN OUTDATED VERSION OF THE CODE**
</div>
```
import os
from time import perf_counter
from pprint import pprint
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from wombat.simulation import WombatEnvironment
from wombat.windfarm import Windfarm
from wombat.windfarm.system import System
from wombat.simulation.repairs import RepairManager
from wombat.simulation.transport import Equipment
from wombat.utilities import load_yaml
pd.set_option("display.max_rows", 1000)
%matplotlib inline
scenarios = {
#### OFFSHORE ####
"offshore_base_3_month_summer":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array.csv", "offshore_jackup_3_month_summer.yaml", "offshore_ctv.yaml", "cable_vessel_3_month_summer.yaml"],
"offshore_base_no_weather_3_month_summer":
["vineyard_wind_weather_1998_2017_no_wind_wave.csv", "offshore_dudgeon_array.csv", "offshore_jackup_3_month_summer.yaml", "offshore_ctv.yaml", "cable_vessel_3_month_summer.yaml"],
"offshore_double_3_month_summer":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array_double.csv", "offshore_jackup_3_month_summer.yaml", "offshore_ctv.yaml", "cable_vessel_3_month_summer.yaml"],
"offshore_half_3_month_summer":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array_half.csv", "offshore_jackup_3_month_summer.yaml", "offshore_ctv.yaml", "cable_vessel_3_month_summer.yaml"],
"offshore_12_month":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array.csv", "offshore_jackup_12_month.yaml", "offshore_ctv.yaml", "cable_vessel_12_month.yaml"],
"offshore_0_month":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array.csv", None, None, None],
"offshore_base_2_month_summer":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array.csv", "offshore_jackup_2_month_summer.yaml", "offshore_ctv.yaml", "cable_vessel_2_month_summer.yaml"],
"offshore_base_no_weather_2_month_summer":
["vineyard_wind_weather_1998_2017_no_wind_wave.csv", "offshore_dudgeon_array.csv", "offshore_jackup_2_month_summer.yaml", "offshore_ctv.yaml", "cable_vessel_2_month_summer.yaml"],
"offshore_base_1_month_summer":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array.csv", "offshore_jackup_1_month_summer.yaml", "offshore_ctv.yaml", "cable_vessel_1_month_summer.yaml"],
"offshore_base_no_weather_1_month_summer":
["vineyard_wind_weather_1998_2017_no_wind_wave.csv", "offshore_dudgeon_array.csv", "offshore_jackup_1_month_summer.yaml", "offshore_ctv.yaml", "cable_vessel_1_month_summer.yaml"],
"offshore_base_3_month_fall":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array.csv", "offshore_jackup_3_month_fall.yaml", "offshore_ctv.yaml", "cable_vessel_3_month_fall.yaml"],
"offshore_base_3_month_winter":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array.csv", "offshore_jackup_3_month_winter.yaml", "offshore_ctv.yaml", "cable_vessel_3_month_winter.yaml"],
"offshore_base_3_month_spring":
["vineyard_wind_weather_1998_2017.csv", "offshore_dudgeon_array.csv", "offshore_jackup_3_month_spring.yaml", "offshore_ctv.yaml", "cable_vessel_3_month_spring.yaml"],
#### ONSHORE ####
"onshore_base_3_month_summer":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array.csv", "onshore_crawler_3_month_summer.yaml", "onshore_onsite.yaml", "onshore_cable_3_month_summer.yaml"],
"onshore_base_no_weather_3_month_summer":
["sweetwater_weather_1998_2017_no_wind_wave.csv", "onshore_dudgeon_array.csv", "onshore_crawler_3_month_summer.yaml", "onshore_onsite.yaml", "onshore_cable_3_month_summer.yaml"],
"onshore_double_3_month_summer":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array_double.csv", "onshore_crawler_3_month_summer.yaml", "onshore_onsite.yaml", "onshore_cable_3_month_summer.yaml"],
"onshore_half_3_month_summer":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array_half.csv", "onshore_crawler_3_month_summer.yaml", "onshore_onsite.yaml", "onshore_cable_3_month_summer.yaml"],
"onshore_12_month":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array.csv", "onshore_crawler_12_month.yaml", "onshore_onsite.yaml", "onshore_cable_12_month.yaml"],
"onshore_0_month":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array.csv", None, None, None],
"onshore_base_2_month_summer":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array.csv", "onshore_crawler_2_month_summer.yaml", "onshore_onsite.yaml", "onshore_cable_2_month_summer.yaml"],
"onshore_base_no_weather_2_month_summer":
["sweetwater_weather_1998_2017_no_wind_wave.csv", "onshore_dudgeon_array.csv", "onshore_crawler_2_month_summer.yaml", "onshore_onsite.yaml", "onshore_cable_2_month_summer.yaml"],
"onshore_base_1_month_summer":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array.csv", "onshore_crawler_1_month_summer.yaml", "onshore_onsite.yaml", "onshore_cable_1_month_summer.yaml"],
"onshore_base_no_weather_1_month_summer":
["sweetwater_weather_1998_2017_no_wind_wave.csv", "onshore_dudgeon_array.csv", "onshore_crawler_1_month_summer.yaml", "onshore_onsite.yaml", "onshore_cable_1_month_summer.yaml"],
"onshore_base_3_month_fall":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array.csv", "onshore_crawler_3_month_fall.yaml", "onshore_onsite.yaml", "onshore_cable_3_month_fall.yaml"],
"onshore_base_3_month_winter":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array.csv", "onshore_crawler_3_month_winter.yaml", "onshore_onsite.yaml", "onshore_cable_3_month_winter.yaml"],
"onshore_base_3_month_spring":
["sweetwater_weather_1998_2017.csv", "onshore_dudgeon_array.csv", "onshore_crawler_3_month_spring.yaml", "onshore_onsite.yaml", "onshore_cable_3_month_spring.yaml"],
}
np.random.seed(0)
library_path = "/Users/rhammond/Documents/GitHub/wombat/library/"
print(f"{'Simulation Name':>42} | {'Run Time':>9} | {'Data Time':>10} | {'Total Time':>11} | {'N Events':>10} | {'N Operations':>12}")
def run_scenario(simulation_name, weather, layout, crane, crew_transfer, cable):
print(f"{simulation_name:>42}", end=" | ")
start = perf_counter()
#### THE SETUP ####
env = WombatEnvironment(library_path, weather, simulation_name=simulation_name, workday_start=8, workday_end=18)
manager = RepairManager(env)
windfarm = Windfarm(env, layout, manager)
if crane is not None:
crn = Equipment(env, windfarm, manager, crane)
if crew_transfer is not None:
ctv = Equipment(env,windfarm, manager, crew_transfer)
if cable is not None:
cab = Equipment(env, windfarm, manager, cable)
#### RUN THE SCENARIO ####
env.run()
end1 = perf_counter()
print(f"{(end1 - start) / 60:>9,.2f}", end=" | ")
#### GATHER AND SAVSE THE RESULTS ####
events = env.create_events_log_dataframe()
operations = env.create_operations_log_dataframe()
events_save_path = os.path.join(library_path, "outputs", "csv_logs", f"{simulation_name}_events.csv")
events.sort_values("env_time").to_csv(events_save_path, index_label="datetime")
turbine_columns = [col for col in operations if col not in ("env_datetime", "env_time", "DOW_OSS")]
column_order = ["env_datetime", "env_time", "windfarm", "DOW_OSS"] + turbine_columns
operations["windfarm"] = operations["DOW_OSS"].values * np.mean(operations[turbine_columns].values, axis=1)
operations = operations[column_order]
operations_save_path = os.path.join(library_path, "outputs", "csv_logs", f"{simulation_name}_operations.csv")
operations.sort_values("env_time").to_csv(operations_save_path, index_label="datetime")
end2 = perf_counter()
print(f"{(end2 - end1) / 60:>10,.2f} | {(end2 - start) / 60:>11,.2f} | {events.shape[0]:>10,.0f} | {operations.shape[0]:>12,.0f}")
# print(f"{simulation_name:>40} | {(end1 - start) / 60:>9,.2f} | {(end2 - end1) / 60:>10,.2f} | {(end2 - start) / 60:>11,.2f}")
for simulation_name, values in scenarios.items():
events_save_path = os.path.join(library_path, "outputs", "csv_logs", f"{simulation_name}_events.csv")
operations_save_path = os.path.join(library_path, "outputs", "csv_logs", f"{simulation_name}_operations.csv")
if os.path.isfile(events_save_path) and os.path.isfile(operations_save_path):
continue
weather, layout, crane, crew_transfer, cable = values
run_scenario(simulation_name, weather, layout, crane, crew_transfer, cable)
```
| github_jupyter |
## todo: 1. kpconv training; 2. inference; 3. inference result visualization; 4. documentation
## Create dataset
```
# add package path
import sys
import os
path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
if path not in sys.path:
sys.path.insert(0, path)
# import packages
from omegaconf import OmegaConf
import pyvista as pv
import torch
import numpy as np
from tqdm.auto import tqdm
import time
import laspy
from rock_detection_3d.datasets.segmentation.rock_las import RockLASDataset
# configure visualization
os.environ["DISPLAY"] = ":1.0"
os.environ["PYVISTA_OFF_SCREEN"]="true"
os.environ["PYVISTA_PLOT_THEME"]="true"
os.environ["PYVISTA_USE_PANEL"]="true"
os.environ["PYVISTA_AUTO_CLOSE"]="false"
os.system("Xvfb :1 -screen 0 1024x768x24 > /dev/null 2>&1 &")
# configure dataset params
DIR = "" # Replace with your root directory, the data will go in DIR/data.
USE_COLOR = True #@param {type:"boolean"}
pbr_yaml = """
class: None # shapenet.ShapeNetDataset
task: segmentation
dataroot: %s
color: %r # Use color vectors as features
first_subsampling: 0.02 # Grid size of the input data
pre_transforms: # Offline transforms, done only once
- transform: GridSampling3D
params:
size: ${first_subsampling}
train_transforms: # Data augmentation pipeline
- transform: RandomNoise
params:
sigma: 0.01
clip: 0.05
- transform: RandomScaleAnisotropic
params:
scales: [0.9,1.1]
- transform: AddOnes
- transform: AddFeatsByKeys
params:
list_add_to_x: [True]
feat_names: ["ones"]
delete_feats: [True]
test_transforms:
- transform: AddOnes
- transform: AddFeatsByKeys
params:
list_add_to_x: [True]
feat_names: ["ones"]
delete_feats: [True]
""" % (os.path.join(DIR,"data"), USE_COLOR)
from omegaconf import OmegaConf
params = OmegaConf.create(pbr_yaml)
# create dataset
dataset = RockLASDataset(params)
dataset
# visually inspect dataset
#@title Plot samples with part annotations { run: "auto" }
objectid_1 = 3 #@param {type:"slider", min:0, max:100, step:1}
objectid_2 = 4 #@param {type:"slider", min:0, max:100, step:1}
objectid_3 = 5 #@param {type:"slider", min:0, max:100, step:1}
samples = [objectid_1,objectid_2,objectid_3]
p = pv.Plotter(notebook=True,shape=(1, len(samples)),window_size=[1024,412])
for i in range(len(samples)):
p.subplot(0, i)
sample = dataset.train_dataset[samples[i]]
point_cloud = pv.PolyData(sample.pos.numpy())
point_cloud['y'] = sample.y.numpy()
p.add_points(point_cloud, show_scalar_bar=False, point_size=4)
p.camera_position = [-1,5, -10]
p.show()
```
## Create segmentation model
```
# import packages
from torch_points3d.applications.kpconv import KPConv
# create KPConv model
color = 3 # use RGB data
class SegKPConv(torch.nn.Module):
def __init__(self, cat_to_seg):
super().__init__()
self.unet = KPConv(
architecture = "unet",
input_nc = color,
output_nc = 2, # isPBR & notPBR
num_layers= 4,
in_grid_size = params['first_subsampling'], # grid size at the entry of the network; should be consistent of dataset first sampling resolution
)
@property
def conv_type(self):
""" This is needed by the dataset to infer which batch collate should be used"""
return self.unet.conv_type
def get_batch(self):
return self.batch
def get_output(self):
""" This is needed by the tracker to get access to the ouputs of the network"""
return self.output
def get_labels(self):
""" Needed by the tracker in order to access ground truth labels"""
return self.labels
def get_current_losses(self):
""" Entry point for the tracker to grab the loss """
return {"loss_seg": float(self.loss_seg)}
def forward(self, data):
self.labels = data.y
self.batch = data.batch
# Forward through unet and classifier
output_batch = self.unet(data)
self.output = output_batch.x
#print(self.output)
#print(self.labels)
# Set loss for the backward pass
self.loss_seg = torch.nn.functional.nll_loss(self.output, self.labels)
return self.output
def get_spatial_ops(self):
return self.unet.get_spatial_ops()
def backward(self):
self.loss_seg.backward()
model = SegKPConv(dataset.class_to_segments)
model
```
## The data loaders and CPU pre computing features
KPConv is quite demanding on spatial operations such as grid sampling and radius search. On the network loaded here we have 10 KPConv layers on the encoder which means 10 radius search operations with varying number of neighbours. We observed a significant performance gain by moving those operations to the CPU where they can easily be optimised with suitable data structures such as kd-tree. We use [nonaflann](https://github.com/jlblancoc/nanoflann) in the back-end, a 3D optimised kd-tree implementation. Note that this is beneficiary only if you have access to multiple CPU threads.
You can decide to precompute those spatial operations by setting the `precompute_multi_scale` parameter to `True` when creating the data loaders. The dataset will mine the model to figure out which spatial operations are required and in which order.
```
NUM_WORKERS = 2 # 4
BATCH_SIZE = 2 # 16
dataset.create_dataloaders(
model,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=True,
precompute_multi_scale=True
)
sample = next(iter(dataset.train_dataloader))
sample.keys
```
Our `sample` contains the pre computed spatial information in the `multiscale` (encoder side) and `upsample` (decoder) attrivutes. The decoder pre computing is quite simple and just involves some basic caching for the nearest neighbour interpolation operation. Let's take a look at the encoder side of things first.
```
sample.multiscale
```
`sample.multiscale` contains 10 different versions of the input batch, each one of these versions contains the location of the points in `pos` as well as the neighbours of these points in the previous point cloud. We will first look at the points coming out of each downsampling layer (strided convolution), we have 5 of them.
```
#@title Successive downsampling {run:"auto"}
sample_in_batch = 0 #@param {type:"slider", min:0, max:5, step:1}
ms_data = sample.multiscale
num_downsize = int(len(ms_data) / 2)
p = pv.Plotter(notebook=True,shape=(1, num_downsize),window_size=[1024,256])
for i in range(0,num_downsize):
p.subplot(0, i)
pos = ms_data[2*i].pos[ms_data[2*i].batch == sample_in_batch].numpy()
point_cloud = pv.PolyData(pos)
point_cloud['y'] = pos[:,1]
p.add_points(point_cloud, show_scalar_bar=False, point_size=3)
p.add_text("Layer {}".format(i+1),font_size=10)
p.camera_position = [-1,5, -10]
p.show()
```
Let's now take one point in a layer (query point) and show its neighbours in the previous layer (support point)
```
#@title Explore Neighborhood {run: "auto"}
selected_layer = 7 #@param {type:"slider", min:1, max:9, step:1}
sample_in_batch = 0 #@param {type:"slider", min:0, max:5, step:1}
point1_id = 3 #@param {type:"slider", min:0, max:600, step:1}
point2_id = 8 #@param {type:"slider", min:0, max:600, step:1}
p = pv.Plotter(notebook=True,shape=(1, 2),window_size=[1024,412])
# Selected layer
p.subplot(0, 1)
ms_data = sample.multiscale[selected_layer]
pos = ms_data.pos[ms_data.batch == sample_in_batch].numpy()
nei = ms_data.idx_neighboors[ms_data.batch == sample_in_batch]
point_cloud = pv.PolyData(pos)
p.add_points(point_cloud, show_scalar_bar=False, point_size=3,opacity=0.3)
p.add_points(pos[point1_id,:], show_scalar_bar=False, point_size=7.0,color='red')
p.add_points(pos[point2_id,:], show_scalar_bar=False, point_size=7.0,color='green')
p.camera_position = [-1,5, -10]
# Previous layer
p.subplot(0, 0)
ms_data = sample.multiscale[selected_layer-1]
pos = ms_data.pos[ms_data.batch == sample_in_batch].numpy()
point_cloud = pv.PolyData(pos)
p.add_points(point_cloud, show_scalar_bar=False,point_size=3, opacity=0.3)
nei_pos = ms_data.pos[nei[point1_id]].numpy()
nei_pos = nei_pos[nei[point1_id] >= 0]
p.add_points(nei_pos, show_scalar_bar=False, point_size=3.0,color='red')
nei_pos = ms_data.pos[nei[point2_id]].numpy()
nei_pos = nei_pos[nei[point2_id] >= 0]
p.add_points(nei_pos, show_scalar_bar=False, point_size=3.0,color='green')
p.camera_position = [-1,5, -10]
p.show()
```
## Train neural network
```
class Trainer:
def __init__(self, model, dataset, num_epoch = 60, device=torch.device('cuda'), checkpoint_path="model/kpconv"):
self.num_epoch = num_epoch
self._model = model
self._dataset=dataset
self.device = device
self.checkpoint_path = checkpoint_path
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
def save_model(self, epoch):
f = os.path.join(self.checkpoint_path, "{epoch}.pt".format(epoch=epoch))
torch.save({
'epoch': epoch,
'model_state_dict': self._model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss': self._model.loss_seg,
}, f)
def load_model(self, epoch):
f = os.path.join(self.checkpoint_path, "{epoch}.pt".format(epoch=epoch))
assert os.path.isfile(f)
checkpoint = torch.load(f)
self._model.load_state_dict(checkpoint['model_state_dict'])
self._model.to(self.device)
self._model.eval()
return self._model
def resume_model(self, epoch):
f = os.path.join(self.checkpoint_path, "{epoch}.pt".format(epoch=epoch))
assert os.path.isfile(f)
self.optimizer = torch.optim.Adam(self._model.parameters(), lr=0.001)
self.tracker = self._dataset.get_tracker(False, True)
checkpoint = torch.load(f)
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self._model.load_state_dict(checkpoint['model_state_dict'])
self._model.to(self.device)
def fit(self):
self.optimizer = torch.optim.Adam(self._model.parameters(), lr=0.001)
self.tracker = self._dataset.get_tracker(False, True)
for i in range(self.num_epoch):
print("=========== EPOCH %i ===========" % i)
time.sleep(0.5)
self.train_epoch()
self.tracker.publish(i)
self.valid_epoch()
self.tracker.publish(i)
self.save_model(i)
def train_epoch(self):
self._model.to(self.device)
self._model.train()
self.tracker.reset("train")
train_loader = self._dataset.train_dataloader
iter_data_time = time.time()
with tqdm(train_loader) as tq_train_loader:
for i, data in enumerate(tq_train_loader):
t_data = time.time() - iter_data_time
iter_start_time = time.time()
self.optimizer.zero_grad()
data.to(self.device)
self._model.forward(data)
self._model.backward()
self.optimizer.step()
if i % 10 == 0:
self.tracker.track(self._model)
tq_train_loader.set_postfix(
**self.tracker.get_metrics(),
data_loading=float(t_data),
iteration=float(time.time() - iter_start_time),
)
iter_data_time = time.time()
def valid_epoch(self):
self._model.to(self.device)
self._model.eval()
self.tracker.reset("val")
val_loader = self._dataset.val_dataloader
iter_data_time = time.time()
with tqdm(val_loader) as tq_val_loader:
for i, data in enumerate(tq_val_loader):
t_data = time.time() - iter_data_time
iter_start_time = time.time()
data.to(self.device)
self._model.forward(data)
self.tracker.track(self._model)
tq_val_loader.set_postfix(
**self.tracker.get_metrics(),
data_loading=float(t_data),
iteration=float(time.time() - iter_start_time),
)
iter_data_time = time.time()
def test(self, save=True):
self._model.to(self.device)
self._model.eval()
self.tracker.reset("test")
test_loader = self._dataset.test_dataloaders[0]
iter_data_time = time.time()
with tqdm(test_loader) as tq_test_loader:
for i, data in enumerate(tq_test_loader):
t_data = time.time() - iter_data_time
iter_start_time = time.time()
data.to(self.device)
self._model.forward(data)
self.tracker.track(self._model)
tq_test_loader.set_postfix(
**self.tracker.get_metrics(),
data_loading=float(t_data),
iteration=float(time.time() - iter_start_time),
)
if save:
self.save_las(data)
iter_data_time = time.time()
self.tracker.publish(self.num_epoch - 1)
if save:
print("saved prediction las files")
def save_las(self, batch_data):
pred = self._model.get_output()
outputs = torch.argmax(pred, 1)
for i in torch.unique(batch_data.batch):
idx = batch_data.batch==i
pos = batch_data.pos[idx]
x = batch_data.x[idx]
y = outputs[idx]
origin_id = batch_data.origin_id[idx]
id_scan = batch_data.id_scan[i]
grid_size = batch_data.grid_size[i]
category = batch_data.category[idx]
center = batch_data.center[i*3:i*3+3]
scale = batch_data.scale[i]
file_name = batch_data.file_name[i]
self.write_las(file_name, pos, x, y, center, scale)
def write_las(self, file_name, pos, x, y, center, scale):
path = 'data/rocklas/prediction_3d'
if not os.path.exists('data/rocklas/prediction_3d'):
os.makedirs(self.checkpoint_path)
pos = pos / scale
pos = pos + center
pos = pos.cpu().detach().numpy()
color = (x * (2**16)).cpu().detach().numpy().astype(np.uint16)
PBR_ids = (y==1).cpu().detach().numpy()
notPBR_ids = (y==0).cpu().detach().numpy()
isPBR = np.empty(pos.shape[0])
isPBR[:] = np.NaN
notPBR = isPBR.copy()
isPBR[PBR_ids] = 0
notPBR[notPBR_ids] = 1
f = os.path.join(path, "pred_{f}".format(f=file_name))
header = laspy.LasHeader(point_format=2, version="1.2")
header.scales = np.array([0.01, 0.01, 0.01])
header.add_extra_dim(laspy.ExtraBytesParams(name="isPBR", type=np.float64))
header.add_extra_dim(laspy.ExtraBytesParams(name="notPBR", type=np.float64))
las = laspy.LasData(header)
las.x = pos[:, 0]
las.y = pos[:, 1]
las.z = pos[:, 2]
las.red = color[:, 0]
las.green = color[:, 1]
las.blue = color[:, 2]
las.isPBR = isPBR
las.notPBR = notPBR
las.write(f)
trainer = Trainer(model, dataset)
trainer.fit()
%load_ext tensorboard
%tensorboard --logdir tensorboard/ # Change for your log location
```
## Inference
```
#trainer.resume_model(49)
trainer.test()
from rock_detection_3d.utils.las_reader import Read_Las_from_Path, Read_Las_from_Json
pred_las_reader = Read_Las_from_Path('data/rocklas/prediction_3d')
print(len(pred_las_reader))
las_reader = Read_Las_from_Json('data/rocklas/raw/test_split.json')
print(len(las_reader))
idx = 6
p = pv.Plotter(notebook=True,shape=(1, 2),window_size=[1024,412])
p.subplot(0, 0)
pos, color, y = pred_las_reader.get_normalized(idx)
point_cloud = pv.PolyData(pos)
point_cloud['y'] = y
p.add_points(point_cloud, show_scalar_bar=False, point_size=4)
p.camera_position = [-1,5, -10]
p.subplot(0, 1)
pos, color, y = las_reader.get_normalized(idx)
point_cloud = pv.PolyData(pos)
point_cloud['y'] = y
p.add_points(point_cloud, show_scalar_bar=False, point_size=4)
p.camera_position = [-1,5, -10]
p.show()
```
| github_jupyter |
<center><h1><strong>taudata Analytics</strong></h1></center>
<center><h2><strong><font color="blue">Exploratory Data Analysis-02-C: Visualisasi Data Teks </font></strong></h2></center>
<img alt="" src="images/covers/taudata-cover.jpg"/>
<b><center>(C) Taufik Sutanto</center>
<center><h3><font color="blue">https://taudataid.github.io/eda-02C-TA/</font></h3></center>
# <center><font color="blue">Outline Visualisasi Teks Media Sosial</font></center>
## <font color="green">Text Visualization using Voyant Tools</font>
* Word Clouds
* Word Links
* Word Context for Interpretation
* Word Tree
* Word Trends
><font color="red">"*I always have a basic plot outline, but I like to leave some things to be decided while I write*." ~ J. K. Rowling</font>
```
import warnings; warnings.simplefilter('ignore')
import nltk
try:
import google.colab
IN_COLAB = True
!wget https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/taudataEDA02CTA.py
!mkdir data
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/slang.dic
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/slang.txt
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/stopwords_id.txt
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/stopwords_en.txt
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/kata_dasar.txt
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/wn-ind-def.tab
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/wn-msa-all.tab
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/ind_SA.csv
!wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/all_indo_man_tag_corpus_model.crf.tagger
!pip install --upgrade spacy python-crfsuite unidecode textblob sastrawi sklearn-pycrfsuite
!pip install --upgrade unidecode twython tweepy beautifulsoup4
!python -m spacy download xx_ent_wiki_sm
!python -m spacy download en_core_web_sm
nltk.download('popular')
except:
IN_COLAB = False
print("Running the code locally, please make sure all the python module versions agree with colab environment and all data/assets downloaded")
import taudataEDA02CTA as tau
import json, urllib.request, requests, tweepy
from urllib.request import Request, urlopen
from tqdm import tqdm
"Done"
# Contoh API Keys (Sesuaikan dengan API keys masing-masing)
Ck = 'QCgrPpBwTo8OJeV' # consumer_key
Cs = '3FRZuNT4KgWVIAaqIISgeMweP6zOIT' # consumer_secret
At = '2214118411-3QqM3CkNfWFDn7' # access_token
As = 'THDDQsNeeOT1DzfSM6MH' # access_secret
'Done'
```
# Trending Topic Saat ini?
* https://twitter-trends.iamrohit.in/indonesia
* https://trends24.in/indonesia/
### Jangan Lupa
<img alt="" src="images/query_Operator.png" style="width: 661px; height: 554px;" />
```
# Pengambilan Data dan-atau Loading Data
qry = "singapura somad OR singapura uas"
tweetFile = 'D:/Desktop/tweet_{}_EDA02CTA.json'.format(qry)
try:
data = tau.loadTweets(file=tweetFile)
print("Local Data loaded, processing {} tweets.".format(len(data)))
except Exception as Err_:
print("Local data not available, importing data directly from twitter ... ")
api = tau.connect(key=(Ck, Cs, At, As))
data = tau.crawlTwitter(api, qry, N=5000)
tau.saveTweets(data, file=tweetFile) # Save untuk lain waktu
tweets = [t['full_text'] for t in data] # Hanya mengambil tweeet saja untuk module ini.
```
# Preprocessing text Menggunakan fungsi dari taudata
```
stopId, lemmaId = tau.LoadStopWords(lang='id')
slangFixId = tau.loadCorpus(file = 'data/slang.txt', sep=':')
cleaned_data = []
for tweet in tqdm(tweets):
cleaned_data.append(tau.cleanText(tweet, lemma=lemmaId, lan='id', stops = stopId, fix=slangFixId))
cleaned_data[:7]
```
# Menyimpan Hasil Preprocessing untuk di Visualisasi
### Kita simpan juga data yang tidak di preprocessing untuk module D (SNA)
```
# Save ke txt ... lalu nanti akan di upload ke Voyant Tools
filename = tweetFile.replace(".json", ".txt")
tau.saveTweets(cleaned_data, file=filename, type_="plain")
"Tweets Saved: ".format(filename) # Silahkan cek di folder "Data", pindahkan ke Desktop untukk memudahkan tahap selanjutnya
from unidecode import unidecode
# Save Raw tweet untuk mendapatkan konteks yang lebih baik
tweets = [unidecode(t) for t in tweets]
tau.saveTweets(tweets, file=filename.replace(".txt", "_tweets.txt"), type_="plain")
```
<h2>Text Analytics</h2>
<ul>
<li>Tidak seperti data terstruktur, data tidak terstruktur seperti teks termasuk salah satu data yang cukup sulit untuk divisualisasikan.<br />
<img alt="" src="images/11_charts.jpg" style="height:150px; width:276px" /></li>
<li>Namun terdapat Tools seperti Voyant yang dapat membantu dalam visualisasi sekaligus analisis.<br />
<img alt="" src="images/11_voyant.png" style="height:118px; width:426px" /></li>
</ul>
<h3 id="Voyant-dapat-digunakan-dalam-2-cara:">Voyant dapat digunakan dalam 2 cara:</h3>
<ol>
<li>
<p><strong>Online</strong>: <a href="https://voyant-tools.org/" target="_blank">https://voyant-tools.org/</a><br />
<u>Kelebihan</u>: Sederhana & portable, tanpa harus install di komputer kita.<br />
<u>Kekurangan</u>: butuh koneksi internet, tidak cocok untuk data teks yang besar, privacy.</p>
</li>
<li>
<p><strong>Offline </strong>di komputer kita [Java Based]</p>
</li>
</ol>
[2]. Jalankan Voyant secara offline atau online di URL <a href="https://voyant-tools.org/" target="_blank">https://voyant-tools.org/</a>
[3]. Upload file yang baru saja kita simpan.
<h3 id="Penggunaan-Voyant-1:-WordClouds">Penggunaan Voyant 1: WordClouds</h3>
<ol>
<li>Upload teks yang akan di analisa: hasil cluster/ suatu kategori/ topics / raw text.</li>
<li>slider terms: mengkontrol banyaknya terms yang disertakan.</li>
<li><strong>Summary </strong>(statistics)</li>
<li><strong>Documents </strong>==> add more</li>
<li><strong>Phrases </strong>(n-grams like)</li>
<li><strong>Export </strong>Visualisasi (kanan atas - pertama)</li>
<li><strong>Options </strong>(kanan atas ke-3): Font, size, stopwords, whitelist</li>
<li>"?" ==> More Help</li>
</ol>
<p> </p>
<h3>Penggunaan Voyant 2: Word Links</h3>
<ol>
<li>Upload teks yang akan di analisa: hasil cluster/ suatu kategori/ topics / raw text.<br />
Atau file yang sudah terupload sebelumnya</li>
<li>Visualization Tools ==> Links</li>
<li>Klik sembarang terms untuk expand</li>
</ol>
# Best practice dan catatan mengenai Visualisasi Wordclouds (WC):
* Gunakan Kata Benda saja (misal pada kasus Topic Modelling).
* Atau Gunakan hanya Kata Sifat (pada kasus Sentiment Analysis).
* WC dapat digunakan untuk memeriksa apakah preprocessing data teks kita sudah cukup baik.
* Python juga memiliki module "WordClouds" yang dapat membuat WC yang lebih variatif.
*
<h3 id="Penggunaan-Voyant-3:-Word-Tree">Penggunaan Voyant 3: Word Tree</h3>
<ol>
<li>Upload teks yang akan di analisa: hasil cluster/ suatu kategori/ topics / raw text.<br />
Atau file yang sudah terupload sebelumnya</li>
<li>Klik branch untuk expand</li>
</ol>
<h3>Penggunaan Voyant 4: Trends</h3>
<ol>
<li>Upload teks yang akan di analisa: hasil cluster/ suatu kategori/ topics / raw text.<br />
Atau file yang sudah terupload sebelumnya</li>
<li>Document Tools ==> Trends</li>
<li>.. Butuh preprocessing ... </li>
<li>Data harus terurut waktu</li>
<li>Berikut contohnya</li>
</ol>
<p><strong>Latihan : </strong></p>
<ol>
<li>Crawl twitter dengan salah satu topik yang sedang trending saat ini di Indonesia. </li>
<li>Lakukan Preprocessing data</li>
<li>Visualisasikan</li>
<li>Diskusikan insight/informasi yang didapat</li>
</ol>
<h1>End of Module C</h1>
<hr />
<img alt="" src="images/meme-cartoon/2_Linguistic_Joke.jpg" style="height:400px; width:608px" />
| github_jupyter |
```
#Code to run TICA docking for protein-ligand md on backbone positions
import pyemma
import glob
import numpy as np
import pyemma.plots as mplt
%pylab inline
import mdtraj as md
import pyemma.coordinates as coor
def average_by_state(dtraj, x, nstates):
assert(len(dtraj) == len(x))
N = len(dtraj)
res = np.zeros((nstates))
for i in range(nstates):
I = np.argwhere(dtraj == i)[:,0]
res[i] = np.mean(x[I])
return res
def avg_by_set(x, sets):
# compute mean positions of sets. This is important because of some technical points the set order
# in the coarse-grained TPT object can be different from the input order.
avg = np.zeros(len(sets))
for i in range(len(sets)):
I = list(sets[i])
avg[i] = np.mean(x[I])
return avg
trajfile = []
for i in range(1,6):
path = '/net/jam-amaro-shared/bccgc4/CatS_holo_md/joined_md/joined_holo_md_'+str(i)+'.nc'
trajfile.append(path)
print(trajfile)
#assigns the features, which in this case is backbone atom positions
feat = coor.featurizer('/net/jam-amaro-shared/bccgc4/CatS_holo_md/joined_md/holo_top.h5')
feat.add_selection([1024, 1027, 306, 1029, 1031, 1033, 2851, 1035, 1036, 1037, 1038, 1039, 1034, 1040, 1042, 1043, 1044, 1045, 1041, 2854, 1049, 2076, 311, 1060, 1061, 1062,
1072, 1074, 1076, 1087, 2112, 2116, 2120, 2122, 2123, 2126, 2135, 323, 2138, 324, 325, 1133, 1134, 1135, 1136, 1137, 2170, 330, 3206, 3207, 3208, 3210,
334, 3219, 336, 2207, 2209, 2210, 2211, 2213, 2214, 2215, 2217, 2219, 2220, 2221, 3244, 1715, 1716, 1743, 1745, 1747, 1749, 2897, 2837, 328, 329, 2899, 339, 342, 343, 350, 2911, 354, 359, 361, 362, 363, 364, 365, 366, 367, 379, 385, 386, 395, 398, 2449,
2451, 2452, 2453, 2454, 2455, 2456, 2460, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2477, 2478, 941, 2480, 2481, 946, 2834, 944, 2486,
2487, 2488, 2835, 2492, 2836, 2494, 963, 2838, 2506, 2507, 2508, 2839, 2510, 972, 978, 979, 980, 2841, 2842, 2844, 997, 2845, 1003, 2846, 1008, 1007, 1010,
1009, 2847, 1012, 1013, 1011, 1016, 1017, 1018, 1022])
#defining the trajs and features without loading into memory
inp = coor.source(trajfile, feat)
print(inp)
print('trajectory length = ',inp.trajectory_length(0))
print('number of dimension = ',inp.dimension())
lag = 1000
tica_obj = coor.tica(inp, lag = lag, var_cutoff = 0.95, kinetic_map = False)
# here we get the data that has been projected onto the first 2 IC's. It's a list, because we could generally
# have a list of trajectories, so we just get the first element.
Y = tica_obj.get_output()
print('Mean values: ', np.mean(Y, axis=0))
print('Variances: ', np.var(Y, axis=0))
#print(-lag/np.log(tica_obj.eigenvalues[:5]))
#making clusters from kmeans
cl = coor.cluster_kmeans(data = Y,k = 10, max_iter = 5000)
# for later use we save the discretetrajectories (dtrajs) and cluster center coordinates:
dtrajs = cl.dtrajs
cc_x = cl.clustercenters[:,0]
cc_y = cl.clustercenters[:,1]
print(dtrajs)
print(np.size(dtrajs))
cl.converged
#plotting the free energy in subplots for tics 0-5
for s in range(6):
fig, ax = plt.subplots(1, 6, sharex='col', sharey='row', figsize = (20,3.4)) #creating 1x6 subplot grid
for w in range(6):
mplt.plot_free_energy(np.vstack(Y)[:,s], np.vstack(Y)[:,w], ax = ax[w], cmap = 'viridis')#, cbar = False, cbar_label = None)
cc_x = cl.clustercenters[:,s]
cc_y = cl.clustercenters[:,w]
#ax[w].plot(cc_x,cc_y, linewidth=0, marker='o', markersize=5, color='red')
colors = ['black','gray','red','saddlebrown','darkorange','gold','darkgreen','aqua','darkviolet','deeppink']
for i in range(10):
ax[w].scatter(cc_x[i], cc_y[i], color = colors[i])
for a in range(6):
ax[a].set(xlabel = ('TIC '+str(a)))
fig.text(0.001, 0.5, 'TIC '+str(s), va = 'center', rotation='vertical')
fig.suptitle('TICA Cluster Centroids',fontsize = 16, y=1.06)
fig.tight_layout()
plt.savefig('/home/jegan/Clustering_methods/TICA/figs/CBA_holo/TICA_FE_TIC'+str(s)+'.png', bbox_inches = 'tight')
#This tells us how much each feature contributes to each TIC
tica_obj.feature_TIC_correlation
#This tells us the number of TICs TICA produces to retain the variance we want, which is .95
tica_obj.cumvar
#This prints the discrete trajectories we saved out before, then saves out the percent of frames in each cluster
print(dtrajs)
print(dtrajs[0])
with open('/home/jegan/Clustering_methods/TICA/clusters_CBA_holo.txt', 'w') as newfile:
numb = []
for i in range(10):
frames = []
for k in dtrajs:
for p in k:
if p == i:
frames.append(p)
print(len(frames))
numb.append(len(frames))
newfile.write('Percent of frames per TICA CBA holo cluster:\n')
tot = 0
index = 0
for j in numb:
tot += j
percent = (j/450000)*100
num = str(percent)
newfile.write('Cluster '+ str(index)+' = '+num+' %\n')
index += 1
print(tot)
#Extracting Centroids
avg = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
indices = {}
for i in range(len(Y)):
#for k in range(len(Y[0][i])):
for k in range(len(Y[i])):
c = cl.clustercenters[cl.dtrajs[i][k]]
v = Y[i][k]
newavg = np.linalg.norm(c-v)
if avg[cl.dtrajs[i][k]] > newavg:
avg[cl.dtrajs[i][k]] = newavg
indices[cl.dtrajs[i][k]] = [i, k]
pdb0 = md.load('/home/jegan/final_centroids/XTAL/XTAL_0.pdb')
z = 0
for key in indices:
z = z + 1
index = indices[key][1]*(1 + indices[key][0])
print(index)
cen = md.load_frame('/net/jam-amaro-shared/bccgc4/CatS_holo_md/joined_md/entire_holo_md.nc', index, top = '/net/jam-amaro-shared/bccgc4/CatS_holo_md/joined_md/holo_top.h5')
pdb = cen.superpose(pdb0,0)
pdb.save_pdb('/home/jegan/Clustering_methods/TICA/TICA_CBA_holo_centroids/TICA_%s.pdb' % (z-1))
#saving out the first frame as the xtal
trajectory = md.load('/net/jam-amaro-shared/bccgc4/CatS_lig_md/md/joined_prot_traj.nc',
top = '/net/jam-amaro-shared/bccgc4/CatS_lig_md/md/joined_prot_top.h5')
pdb0 = md.load('/home/jegan/final_centroids/XTAL/XTAL_0.pdb')
i = trajectory[0].topology.select('protein')
new = trajectory[0].atom_slice(i)
select = new.topology.select('resid 0 to 86')
pdb = new.superpose(pdb0,0,select)
pdb.save_pdb('/home/jegan/final_centroids/prot-lig/XTAL.pdb')
```
| github_jupyter |
# Sample analysis of risk factors
This is an example of quantitative modeling of risk factors using Jupyter notebooks with seaborn for visualization and PyMC3 for Bayesian statistical modeling. Click on the "Open in Colab" badge to get your own copy of the notebook that you can edit and experiment with without needing to install anything locally!
Written by [Will Maier](https://www.linkedin.com/in/whilp/). <a href="https://colab.research.google.com/github/kindlyops/fair-examples/blob/master/jupyter/Example_risk_analysis_notebook_scenario_a.ipynb"><img align="right" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
```
import scipy.stats as stats
import pymc3 as mc
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
seed = 14211
np.random.seed(seed)
```
# Model
We will develop a hierarchical model composed of many factors, defining each factor iteratively. In some cases, the factors will depend on other factors, so we'll work our way up from the most granular factors to risk itself. This approach follows the [FAIR](https://www.fairinstitute.org/) ontology (Open Group standard [O-RT](https://publications.opengroup.org/standards/security/c13k)).
We will express the model using [PyMC3](https://docs.pymc.io/), which provides a concise language to describe probabilistic models. Once defined, PyMC3 can also draw samples from the modelled distribution through Monte Carlo simulation.
In the end, we will be able to analyze collections of these samples as traces from which we can estimate characteristics of the distribution including its mean and quantiles.
To begin, we define model as a context manager. As we develop the model, we will do so in this context
```
model = mc.Model()
```
## Elicitation
Risk models relate various factors, where each factor is itself modeled by a probability distribution. These distributions are defined using parameters that are hard to relate to the underlying concrete phenomena. For example, an expert rarely thinks about a random process like the magnitude of a loss event in terms of its mean or variance. Instead, it is often easier to estimate specific plausible values from the distribution. These can be fit to a distribution and the distribution can then be used to estimate the parameters directly.
This process of estimating distribution parameters is called elicitation. Here, we simply fit a known distribution to whatever plausible data an expert might provide. [SHELF](http://www.tonyohagan.co.uk/shelf/
) offers a more robust method for facilitated elicitation among a group of experts.
The `elicit` function returns a variable following the beta distribution with its mu (mean) and sigma (standard deviation) parameters inferred from an array of estimated values in `data`. These values may be drawn from actual data or -- where such data is not available -- estimated by experts. Providing more values should usually result in an elicited distribution that more closely matches the experts' intent.
This is most useful for continuous variables with upper and lower bounds, where the bounds are the first and last items in `data`. The beta distribution has these characteristics and so is used here. `elicit` could be modified to support other continuous distributions as well.
The return value follows the beta distribution deterministically scaled to the minimum and maximum values from `data`. The [scipy.stats.rv_continuous](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.html#scipy.stats.rv_continuous) variable used for the fit is stored as the `var` attribute of the return value. This may be used to retrieve characteristics of `data`, including its mean or interval.
```
def elicit(name, data):
y = np.sort(data)
width = y.max()-y.min()
par = stats.beta.fit(y[1:-1], floc=y.min(), fscale=y.max())
var = stats.beta(*par)
scaled_mu = var.mean()/width
scaled_sd = var.std()/width
scaled = mc.Beta(f"{name}_scaled__", mu=scaled_mu, sd=scaled_sd)
dist = mc.Deterministic(name, y.min() + (scaled * width))
dist.var = var
return dist
```
For convenience, we also define a function to quickly summarize a single variable. The resulting plot includes a histogram, KDE, and indicators for the 50th, 90th, 95th, and 99th percentile values.
```
def sumplot(data):
sns.distplot(data)
colors = iter(sns.color_palette())
m = np.mean(data)
plt.axvline(m, linestyle=":", color=next(colors), label=f"mean = {m:0.2f}")
ps = [50, 90, 95, 99]
for p in ps:
x = np.percentile(data, p)
plt.axvline(x, linestyle=":", color=next(colors), label=f"{p:0.2f} = {x:0,.2f}")
plt.legend()
```
## Primary loss
```
with model:
pl = elicit("primary_loss", [0, 100, 1005, 1100, 10000])
sumplot(pl.var.rvs(1000))
```
## Secondary loss
```
with model:
sl = elicit("secondary_loss", [0, 10, 105, 50000, 10000])
sumplot(sl.var.rvs(1000))
```
## Risk
```
with model:
risk = mc.Deterministic("risk", pl + sl)
```
## Simulation
```
with model:
trace = mc.sample(seed=seed)
```
## Analysis
```
df = mc.trace_to_dataframe(trace)
sumplot(df.risk)
```
| github_jupyter |
# langages de script – Python
## Cours 2 – structures de données
### M1 Ingénierie Multilingue – INaLCO
clement.plancq@ens.fr
# Avant de commencer
Les structures de données sont un point essentiel de tout langage de programmation
Les diapos qui suivent n'offrent qu'une couverture partielle du sujet
Vous *devez* lire attentivement et vous référer à la documentation officelle de Python :
- [le chapitre 5 du tutoriel](https://docs.python.org/3.6/tutorial/datastructures.html)
- la [doc](https://docs.python.org/3.6/library/stdtypes.html) sur les types built-in
- la [doc](https://docs.python.org/3/library/collections.html) du module collections
# Les listes
- Les listes sont des *sequences* (`str`, `tuple`, `list`)
- Les *sequences* sont des structures de données indicées qui peuvent contenir des éléments de différents types
- Les *sequences* sont des *iterables*, les listes aussi donc
- Les éléments d'une liste peuvent être modifiés (*mutable*)
- Une liste vide peut se déclarer de deux façons
```
stack = list()
stack = []
stack = ["le", "guépard", "le", "poursuit"]
stack = list('Perl')
stack
```
# Les listes : fonctions
- Les listes héritent des fonctions des *sequences*, elles ont également des [fonctions propres](https://docs.python.org/3.6/tutorial/datastructures.html#more-on-lists)
- Parmi ces fonctions nous utiliserons principalement :
- `append(x)` : ajoute un élément x à la fin de la liste (haut de la pile*)
- `extend([x, y, z])` : ajoute tous les éléments de la liste arg à la fin de la liste
- `pop([index])` : supprime et renvoie le dernier élément de la liste ou les index de la liste argument si arg
- `index(x)` : renvoie l'index du premier élément de valeur x
- `count(x)` : renvoie le nombre de fois où x apparaît
- `sort()` : trie et modifie la liste, lire la [doc](https://docs.python.org/3.6/howto/sorting.html#sortinghowto) pour en savoir plus sur les ordres de tri
```
stack = [12, 15, 12, 7, 18]
stack.index(12)
stack.count(12)
stack.sort()
stack
stack.append(23)
stack
stack.append([35, 46])
stack
stack.extend([51, 52])
stack
```
# Les listes en compréhension
- Elles permettent de définir des listes par filtrage ou opération sur les éléments d'une autre liste
- Elles viennent de la programmation fonctionnelle
- La [PEP 202](http://www.python.org/dev/peps/pep-0202/) conseille de préférer les listes en compréhension aux fonctions `map()` et `filter()`
- Pas évident-évident de prime abord mais très expressif, élégant et *so pythonic*. Un dév python **doit** maîtriser les listes en compréhension
```
[i ** 2 for i in range(10)]
[i ** 2 for i in range(10) if i % 2 == 0]
[(i, j) for i in range(2) for j in ['a', 'b']]
```
# Attention !
## Copie de liste
Dans `y = x`, `y` n'est pas un copie de x, les deux pointent vers le même objet
```
x = [1, 2, 3]
y = x
y[0] = 4
x
```
Pour copier une liste il faut utiliser :
```
x = [1, 2, 3]
y = x[:]
# ou
y = list(x)
```
# Les tuples
- Les tuples (`tuple`) sont des *sequences* similaires aux listes sauf qu'elles ne peuvent pas être modifiées (*immutable*)
- Les tuples sont souvent utilisées comme valeur de retour d'une fonction
- Les tuples peuvent être utilisées comme clé de dictionnaire
```
voyelles = ('a', 'e', 'i', 'o', 'u', 'y')
var = tuple('Perl')
var
```
# Déballage de séquences
- Le *sequence unpacking* permet d'effectuer plusieurs affectations simultanées
- L'*unpacking* s'applique souvent sur des tuples
```
x, y, z = (1, 2, 3)
y
lexique = [("maison", "mEz§"), ("serpent", "sERp@")]
for ortho, phon in lexique:
print(phon)
```
# Attention !
## Tuple à 1 élément
Pour créer un tuple à un élément il faut utiliser la notation `(elem, )`
```
var = (1)
type(var)
var = (1, )
type(var)
```
# Parcours de liste
La boucle `for` est particulièrement adaptée pour parcourir les *iterables* et donc les listes
```
for item in voyelles:
print(item)
```
La fonction `enumerate` peut être utile dans certains cas, elle renvoie un `tuple` contenant l'indice et la valeur de l'item à l'indice concerné
```
for i, item in enumerate(voyelles):
print(i, item)
```
# Les ensembles
Les ensembles (`set`) sont des collections non ordonnées d'élements sans doublons
Les ensembles supportent les fonctions mathématiques d'union, d'intersection, de différence ([doc](https://docs.python.org/3.6/library/stdtypes.html#set))
```
ens1 = {'le', 'guépard', 'le', 'poursuit'}
ens1
ens2 = {"avec", "le", "chandelier", "dans", "la", "cuisine"}
ens1.intersection(ens2)
```
# Les dictionnaires
- Les dictionnaires (`dict`) sont des structures de données associatives de type clé: valeur
- Les clés d'un dictionnaire sont uniques, seuls les types *immutable* peuvent être des clés ([doc](https://docs.python.org/3.6/library/stdtypes.html#mapping-types-dict))
- `key in d` retourne True si `key` est une clé de `d`
- `keys()` retourne la liste des clés
- `values()` retourne la liste des valeurs
- `items()` retourne la liste des couples clé:valeur (tuple)
- `setdefault(key, default)` retourne la valeur associée à la clé. Si la clé n'existe pas, ajoute la clé associée à default
```
d = {'Perl':'Larry Wall', 'Python':'Guido Van Rossum', 'C++':'Bjarne Stroustrup'}
d['Perl']
d['Ruby']
d.get('Ruby')
```
# Module collections
- Le module *collections* propose des implémentations de structures de données supplémentaires
- Dans la liste (voir [doc](https://docs.python.org/3.6/library/collections.html)), deux pourront nous intéresser :
- `defaultdict`
`defauldict` est similaire à un `dict` mais il permet l'autovivification
Son implémentation le rend plus rapide qu'un dictionnaire utilisé avec la fonction `setdefault`
```
import collections
lexique = [("couvent", "kuv"), ("couvent", "kuv@")]
dico = collections.defaultdict(list)
for ortho, phon in lexique:
dico[ortho].append(phon)
dico
```
# Module collections
- Counter
`Counter` est un dictionnaire où les valeurs attendues sont les nombres d'occurences des clés
```
from collections import Counter
cnt = Counter()
list = ['le', 'guépard', 'le', 'poursuit']
for item in list:
cnt[item] += 1
cnt
```
# Help !
## Fonction `repr`
Dans la console Python les structures de données s'affichent de façon lisible
`print(obj)` ne donnera pas toujours le résultat escompté
La fonction `repr(obj)` est à préférer, elle donne une "représentation textuelle" d'un objet
#### Exos
Vous rendrez des scripts Python3. Avec des commentaires c'est mieux.
1. À partir du fichier tsv `sem_Ef9POe.conll`
1. pour chaque POS listez les types classés par ordre d'occurrence décroissante,
2. pour chaque type de chunk indiquez les longueurs min et max (en nb de mots).
2. Résoudre [Mime types](https://www.codingame.com/training/easy/mime-type) sur CodinGame
| github_jupyter |
# Inventory Allocation
This notebook provides two prototypes of solvers for the inventory allocation problem. We use the problem statements that are essentially equivalent to the classic linear transportation problem.
The first prototype focuses on the basic formulation where a single product need to be allocated across $n$ warehouses to serve $m$ markets.
The second prototype focuses on an extended formulation of the problem where some product combinations are frequently purhased together. In this formulation, we need to ensure that such combinations (itemsets) are collocated so that orders are not split across multiple warehouses.
```
#
# Initialization
#
from ortools.linear_solver import pywraplp
from ortools.init import pywrapinit
import numpy as np
pywrapinit.CppBridge.InitLogging('linear-transportation-problem.py')
cpp_flags = pywrapinit.CppFlags()
cpp_flags.logtostderr = True
cpp_flags.log_prefix = False
pywrapinit.CppBridge.SetFlags(cpp_flags)
```
# Scenario 1: Allocation Optimization for a Single Product
We consider a retailer that needs to allocate the inventory across multiple wareshouses. We assume only one SKU.
We assume $n$ warehouses that serve $m$ markets (demand points). The cost of shipping one unit from $i$-th warehouse to $j$-th market is $c^s_{ij}$. The unit selling price in market $j$ is $p_j$.
We aim to determine the number of units $q_i$ that need to be allocated in each warehouse by solving the following LP problem:
$$
\max_{q} \quad \sum_j p_j \sum_i q_{ij} - \sum_i\sum_j c^s_{ij} q_{ij}
$$
subject to
$$
\sum_{i} q_{ij} \le D_j, \quad j=1,\ldots,m
$$
where $q_{ij}$ if the number of units allocated for market $j$ in warehouse $i$ and $D_j$ is the demand in market $j$. The allocation level at warehouse $i$ is then
$$
q_i = \sum_j q_{ij}
$$
```
solver = pywraplp.Solver.CreateSolver('GLOP')
# Number of warehouses (index i)
n = 3
# Number of markets (index j)
m = 5
# Market demands
d = [10, 20, 30, 30, 20]
# Shipping costs (cost of shipping one unit from warehouse i to market j)
cs = np.array([
[1.0, 1.0, 1.0, 1.0, 0.2],
[1.0, 1.0, 0.5, 0.3, 0.5],
[0.5, 0.3, 3.0, 0.5, 0.5]
])
# Market prices
p = [1, 2, 1, 3, 1]
#
# Define variables
#
q = np.array([[solver.NumVar(0.0, solver.infinity(), f'q{i}{j}') for j in range(m)] for i in range(n)])
print('Number of variables =', solver.NumVariables())
#
# Define constraints
#
for j in range(m):
solver.Add(np.sum(q[:, j]) <= d[j])
print('Number of constraints =', solver.NumConstraints())
#
# Define the objective
#
revenue = np.sum( [ p[j] * np.sum(q[:, j]) for j in range(m) ])
shipping_costs = np.sum( cs * q )
solver.Maximize(revenue - shipping_costs)
#
# Solve
#
status = solver.Solve()
#
# Print the results
#
solution_value = np.vectorize(lambda x: x.solution_value())
if status == pywraplp.Solver.OPTIMAL:
print('Solution:')
print('Objective value =', solver.Objective().Value())
for i in range(n):
q_i = np.sum(solution_value(q[i, :]))
print(f'Stock level at warehouse {i}: {q_i}')
else:
print('The problem does not have an optimal solution.')
```
# Scenario 2: Frequent Itemsets Should Be Collocated
In this section, we consider an extended formulation of the problem with multiple SKUs. We can specify SKU combinations (itemsets) that are frequently purhased in one order, and the solver is searhing for allocation that ensures that each order can be sourced from one warehouse.
```
# Number of warehouses
W = 3
# Number of markets
M = 5
# Demand by itemset and market
d = [
({'a'}, [10, 20, 30, 30, 20]), # orders with product a only
({'b'}, [20, 30, 10, 40, 10]), # orders with product b only
({'c'}, [40, 30, 10, 10, 20]), # orders with product c only
({'a', 'b'}, [5, 7, 3, 3, 8]), # orders with a and b
({'b', 'c'}, [3, 9, 2, 1, 4]), # orders with b and c
({'a', 'c'}, [7, 2, 5, 9, 7]), # orders with a and c
({'a', 'b', 'c'}, [1, 0, 2, 1, 0]) # orders with a, b, and c
]
# Number of itemsets
S = len(d)
# Shipping costs (cost of shipping one unit from warehouse w to market m)
cs = np.array([
[1.0, 1.0, 1.0, 1.0, 0.2],
[1.0, 1.0, 0.5, 0.3, 0.5],
[0.5, 0.3, 3.0, 0.5, 0.5]
])
# Product unit price by market
pr = {
'a': [1, 2, 1, 3, 1], # product a
'b': [2, 1, 1, 1, 2], # product b
'c': [1, 2, 1, 1, 1] # product c
}
#
# Compute the derived parameters
#
dv = np.array([v for (k,v) in d])
# Compute prices for all itemsets
pr_itemset = np.zeros((S, M))
for s in range(S):
for m in range(M):
items = d[s][0]
for item in items:
pr_itemset[s, m] += pr[item][m]
#
# Specify and solve the optimization problem
#
solver = pywraplp.Solver.CreateSolver('GLOP')
#
# Define variables
#
q = np.array([[[solver.NumVar(0.0, solver.infinity(), f'q{s}{w}{m}')
for m in range(M)]
for w in range(W)]
for s in range(S)])
print('Number of variables =', solver.NumVariables())
#
# Define constraints
#
for m in range(M):
for s in range(S):
solver.Add(np.sum(q[s, :, m]) <= dv[s, m])
print('Number of constraints =', solver.NumConstraints())
#
# Define the objective
#
revenue = np.sum( [[ pr_itemset[s, m] * np.sum(q[s, :, m]) for m in range(M) ] for s in range(S)])
shipping_costs = np.sum( cs * np.sum(q, axis = 0) )
solver.Maximize(revenue - shipping_costs)
#
# Solve
#
status = solver.Solve()
#
# Print the results
#
solution_value = np.vectorize(lambda x: x.solution_value())
if status == pywraplp.Solver.OPTIMAL:
print('Solution:')
print('Objective value =', solver.Objective().Value())
q_sw = np.zeros((S, W))
for s in range(S):
for w in range(W):
q_sw[s, w] = np.sum(solution_value(q[s, w, :]))
q_pw = {p : np.zeros(W) for p in pr.keys()}
for w in range(W):
for s in range(S):
for i in d[s][0]:
q_pw[i][w] += q_sw[s, w]
for w in range(W):
print(f'\nWarehouse {w}:')
for k,v in q_pw.items():
print(f'\tProduct {k} : {v[w]} units')
print(f'\tBreakdown by itemsets:')
for s in range(S):
print(f'\t\t{d[s][0]} : {q_sw[s, w]}')
else:
print('The problem does not have an optimal solution.')
```
| github_jupyter |
# Part I. ETL Pipeline for Pre-Processing the Files
## PLEASE RUN THE FOLLOWING CODE FOR PRE-PROCESSING THE FILES
#### Import Python packages
```
# Import Python packages
import pandas as pd
import cassandra
import re
import os
import glob
import numpy as np
import json
import csv
```
#### Creating list of filepaths to process original event csv data files
```
# checking your current working directory
print(os.getcwd())
# Get your current folder and subfolder event data
filepath = os.getcwd() + '/event_data'
# Create a for loop to create a list of files and collect each filepath
for root, dirs, files in os.walk(filepath):
# join the file path and roots with the subdirectories using glob
file_path_list = glob.glob(os.path.join(root,'*'))
#print(file_path_list)
```
#### Processing the files to create the data file csv that will be used for Apache Casssandra tables
```
# initiating an empty list of rows that will be generated from each file
full_data_rows_list = []
# for every filepath in the file path list
for f in file_path_list:
# reading csv file
with open(f, 'r', encoding = 'utf8', newline='') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
next(csvreader)
# extracting each data row one by one and append it
for line in csvreader:
#print(line)
full_data_rows_list.append(line)
# uncomment the code below if you would like to get total number of rows
#print(len(full_data_rows_list))
# uncomment the code below if you would like to check to see what the list of event data rows will look like
#print(full_data_rows_list)
# creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \
# Apache Cassandra tables
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)
with open('event_datafile_new.csv', 'w', encoding = 'utf8', newline='') as f:
writer = csv.writer(f, dialect='myDialect')
writer.writerow(['artist','firstName','gender','itemInSession','lastName','length',\
'level','location','sessionId','song','userId'])
for row in full_data_rows_list:
if (row[0] == ''):
continue
writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[12], row[13], row[16]))
# check the number of rows in your csv file
with open('event_datafile_new.csv', 'r', encoding = 'utf8') as f:
print(sum(1 for line in f))
```
# Part II. Complete the Apache Cassandra coding portion of your project.
## Now you are ready to work with the CSV file titled <font color=red>event_datafile_new.csv</font>, located within the Workspace directory. The event_datafile_new.csv contains the following columns:
- artist
- firstName of user
- gender of user
- item number in session
- last name of user
- length of the song
- level (paid or free song)
- location of the user
- sessionId
- song title
- userId
The image below is a screenshot of what the denormalized data should appear like in the <font color=red>**event_datafile_new.csv**</font> after the code above is run:<br>
<img src="images/image_event_datafile_new.jpeg">
## Begin writing your Apache Cassandra code in the cells below
#### Creating a Cluster
```
# This should make a connection to a Cassandra instance your local machine
# (127.0.0.1)
from cassandra.cluster import Cluster
try:
cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance
# To establish connection and begin executing queries, need a session
session = cluster.connect()
except Exception as e:
print(e)
```
#### Create Keyspace
```
# TO-DO: Create a Keyspace
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS apache_cassandra
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"""
)
except Exception as e:
print(e)
```
#### Set Keyspace
```
# TO-DO: Set KEYSPACE to the keyspace specified above
try:
session.set_keyspace('apache_cassandra')
except Exception as e:
print(e)
```
### Now we need to create tables to run the following queries. Remember, with Apache Cassandra you model the database tables on the queries you want to run.
## Create queries to ask the following three questions of the data
### 1. Give me the artist, song title and song's length in the music app history that was heard during sessionId = 338, and itemInSession = 4
### 2. Give me only the following: name of artist, song (sorted by itemInSession) and user (first and last name) for userid = 10, sessionid = 182
### 3. Give me every user name (first and last) in my music app history who listened to the song 'All Hands Against His Own'
```
## TO-DO: Query 1: Give me the artist, song title and song's length in the music app history that was heard during \
## sessionId = 338, and itemInSession = 4
delete_sessions_table_query = "DROP TABLE IF EXISTS sessions"
session.execute(delete_sessions_table_query)
create_sessions_table_query = "CREATE TABLE IF NOT EXISTS sessions (artist text, item_in_session int,length float, session_id int, song_title text, PRIMARY KEY (session_id, item_in_session))"
session.execute(create_sessions_table_query)
# We have provided part of the code to set up the CSV file. Please complete the Apache Cassandra code below#
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
## TO-DO: Assign the INSERT statements into the `query` variable
query = "INSERT INTO sessions (artist, item_in_session, length, session_id, song_title)"
query = query + " VALUES (%s, %s, %s, %s, %s)"
session.execute(query, (line[0], int(line[3]), float(line[5]), int(line[8]), line[9]))
```
#### Do a SELECT to verify that the data have been inserted into each table
```
## TO-DO: Add in the SELECT statement to verify the data was entered into the table
select_session_quert = "select artist, song_title, length from sessions WHERE session_id = 338 and item_in_session = 4"
rows = session.execute(select_session_quert)
for row in rows:
print (row.artist, row.song_title, row.length)
```
### COPY AND REPEAT THE ABOVE THREE CELLS FOR EACH OF THE THREE QUESTIONS
```
## TO-DO: Query 2: Give me only the following: name of artist, song (sorted by itemInSession) and user (first and last name)\
## for userid = 10, sessionid = 182
delete_users_table_query = "DROP TABLE IF EXISTS users"
session.execute(delete_users_table_query)
create_users_table_query = "CREATE TABLE IF NOT EXISTS users (artist text, first_name text, item_in_session int, last_name text, session_id int, song_title text, user_id int, PRIMARY KEY ((user_id, session_id), item_in_session))"
session.execute(create_users_table_query)
## TO-DO: Query 3: Give me every user name (first and last) in my music app history who listened to the song 'All Hands Against His Own'
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO users (artist, first_name, item_in_session, last_name, session_id, song_title, user_id)"
query = query + " VALUES (%s, %s, %s, %s, %s, %s, %s)"
session.execute(query, (line[0], line[1], int(line[3]), line[4], int(line[8]), line[9], int(line[10])))
select_user_query = "select artist, song_title, first_name, last_name from users WHERE session_id = 182 and user_id = 10"
rows = session.execute(select_user_query)
for row in rows:
print (row.artist, row.song_title, row.first_name, row.last_name)
```
### Delete and create the song_listens table to match query 3.
```
delete_song_listens_table_query = "DROP TABLE IF EXISTS song_listens"
session.execute(delete_song_listens_table_query)
create_song_listens_table_query = "CREATE TABLE IF NOT EXISTS song_listens (first_name text, last_name text, song_title text, user_id int,PRIMARY KEY (song_title, user_id))"
session.execute(create_song_listens_table_query)
```
### Insert all song_listens into the table
```
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO song_listens (first_name, last_name, song_title, user_id)"
query = query + " VALUES (%s, %s, %s, %s)"
session.execute(query, (line[1], line[4], line[9], int(line[10])))
```
### Do a SELECT to verify that the data have been inserted into the table
### Give me every user name (first and last) in my music app history who listened to the song 'All Hands Against His Own'
```
select_song_listens_query = "select first_name, last_name from song_listens WHERE song_title = 'All Hands Against His Own'"
rows = session.execute(select_song_listens_query)
for row in rows:
print (row.first_name, row.last_name)
```
### Drop the tables before closing out the sessions
```
## TO-DO: Drop the table before closing out the sessions
delete_sessions_table_query = "DROP TABLE IF EXISTS sessions"
session.execute(delete_sessions_table_query)
delete_users_table_query = "DROP TABLE IF EXISTS users"
session.execute(delete_users_table_query)
delete_song_listens_table_query = "DROP TABLE IF EXISTS song_listens"
session.execute(delete_song_listens_table_query)
```
### Close the session and cluster connection¶
```
session.shutdown()
cluster.shutdown()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/GowthamKumar1626/Machine-Learning-Youtube/blob/master/Computer%20Vision/Rock_Paper_Scissor.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
**Hello Guys Welcome to the new session**<br>
Today we will deal with rock paper scissor dataset.<br>
Have you ever faced any problem with <b>Overfitting</b>?<br>
Do you know how to solve the problem of overfitting in Image Classification task?<br>
Join with me I will show you how to deal with it...
## **Imports**
```
%tensorflow_version 2.x. #For colab users
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras import layers
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(42)
tf.random.set_seed(42) #To make this notebook's output stable across runs
```
## **Dataset Builder**
```
builder = tfds.builder("rock_paper_scissors")
info = builder.info
print(info)
```
**About info**<br>
Each image size (300, 300, 3)<br>
No.of labels: 3<br>
No.of splits: 2 (train, test)<br>
Total no.of examples: 2892
## **Download dataset using builder**
```
builder.download_and_prepare()
(train, val, test) = tfds.load("rock_paper_scissors", split=["train", "test[:90%]", "test[90%:]"], shuffle_files=True, as_supervised=True)
```
Note: as_supervised=True will return Tuple with image and labels
## **Collect class names**
```
class_names = []
for i in range(info.features['label'].num_classes):
class_names.append(info.features['label'].int2str(i))
class_names
```
## **Plot one random image**
```
image, label = next(iter(train))
_ = plt.imshow(image)
_ = plt.title(class_names[label])
#Let us define some variables
BATCH_SIZE = 16
BUFFER_SIZE = 1000
NUM_EPOCHS = 5
IMAGE_SIZE = 180
NUM_CLASSES = len(class_names)
```
## **A sequential model for rescale and resize**
```
resize_and_rescale = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
])
resize_image = resize_and_rescale(np.expand_dims(image, axis=0))
_ = plt.imshow(resize_image[0])
plt.show()
```
## **Prepare train and val sets**
```
AUTOTUNE = tf.data.experimental.AUTOTUNE
def prepare(dataset, shuffle=False, training=False):
if training:
dataset = dataset.map(lambda x,y: (resize_and_rescale(x, training=True), y),
num_parallel_calls=AUTOTUNE)
else:
dataset = dataset.map(lambda x,y: (resize_and_rescale(x, training=False), y),
num_parallel_calls=AUTOTUNE)
if shuffle:
dataset = dataset.shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE)
return dataset.prefetch(buffer_size=AUTOTUNE)
train_ds = prepare(train, shuffle=True, training=True)
val_ds = prepare(val)
```
## **Create our MODEL**
```
model = tf.keras.models.Sequential([
layers.Conv2D(32, kernel_size=3, padding="same", activation="relu"),
layers.Conv2D(64, kernel_size=3, padding="same", activation="relu"),
layers.MaxPool2D(),
layers.Flatten(),
layers.Dense(128, activation="relu"),
layers.Dense(10, activation="softmax")
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
optimizer = "adam",
metrics=["accuracy"]
)
history = model.fit(
train_ds,
epochs = NUM_EPOCHS,
validation_data = val_ds
)
```
**Overfit**<br>
Our training set achieved 100% accuracy, but validation set 62%
Our model is overfitted
Let us see learning curves
```
pd.DataFrame(history.history).plot()
```
Learning curves are too bad
## **Plot some predictions with overfitted model**
```
plt.figure(figsize=(15, 15))
for i, datapoint in enumerate(test.take(25)):
ax = plt.subplot(5, 5, i+1)
plt.imshow(datapoint[0])
image = resize_and_rescale(datapoint[0])
image = np.expand_dims(image, axis = 0)
if datapoint[1] == np.argmax(model.predict(image)):
plt.title(class_names[np.argmax(model.predict(image))], color="green")
else:
plt.title(class_names[np.argmax(model.predict(image))], color="red")
plt.axis("off")
plt.show()
```
OMG! More than 10 images are wrong preditions out of 25 images
We will solve this problem in my next session, please watch next video
**Hello Guys**
Welocme back to session. Previously we created a model, that is `overfitted`<br>
What we need to do now inorder to avoid `overfitting`
## **Data Augmentation**
It is a technique to increase the diversity or `randomness` of your training set by applying radom transformations. <br>
This is the first step we need to do if our modelis overfitting (In case of Image classification)
Before that we will create a function for plotting our predictions
```
def plot_predictions(data, model, n_rows=5, n_cols=5):
plt.figure(figsize=(15, 15))
for i, datapoint in enumerate(data.take(n_rows * n_cols)):
ax = plt.subplot(n_rows, n_cols, i+1)
plt.imshow(datapoint[0])
image = resize_and_rescale(datapoint[0])
image = np.expand_dims(image, axis = 0)
if datapoint[1] == np.argmax(model.predict(image)):
plt.title(class_names[np.argmax(model.predict(image))], color="green")
else:
plt.title(class_names[np.argmax(model.predict(image))], color="red")
plt.axis("off")
plt.show()
```
Now lets write our augmentation code
```
augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomZoom(0.3),
tf.keras.layers.experimental.preprocessing.RandomFlip(mode='horizontal_and_vertical'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.3)
])
```
## **Plot some augmented images**
```
image, lable = next(iter(train))
augmented_images = augmentation(np.expand_dims(image, axis=0))
_ = plt.imshow(augmented_images[0])
plt.show()
plt.figure(figsize=(10, 10))
for i in range(9):
augmented_images = augmentation(np.expand_dims(image, axis=0))
ax = plt.subplot(3, 3, i+1)
plt.imshow(augmented_images[0])
plt.axis("off")
plt.show()
```
Yeah it's cool
Now let make some changes in prepare function (which we defined in last session) <br>
Now we will add `augmentation` part in prepare function.
```
AUTOTUNE = tf.data.experimental.AUTOTUNE
def prepare(dataset, shuffle=False, augment=False):
dataset = dataset.map(lambda x,y: (resize_and_rescale(x), y),
num_parallel_calls=AUTOTUNE)
if shuffle:
dataset = dataset.shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE)
if augment:
dataset = dataset.map(lambda x,y: (augmentation(x, training=True), y),
num_parallel_calls=AUTOTUNE)
return dataset.prefetch(buffer_size=AUTOTUNE)
train_ds = prepare(train, shuffle=True, augment=True)
val_ds = prepare(val)
```
Ok now we will grab our previus defined model.<br>
* With out augmentation it is overfitted.<br>
* Now let us check Whether the same situation will repeat or not?
```
model = tf.keras.models.Sequential([
layers.Conv2D(32, kernel_size=3, padding="same", activation="relu"),
layers.Conv2D(64, kernel_size=3, padding="same", activation="relu"),
layers.MaxPool2D(),
layers.Flatten(),
layers.Dense(128, activation="relu"),
layers.Dense(10, activation="softmax")
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
optimizer = "adam",
metrics=["accuracy"]
)
history = model.fit(
train_ds,
epochs = NUM_EPOCHS,
validation_data = val_ds
)
```
Ok it seems our model is not ovefitted now. Great! But accuracy is too low.
Let me first plot the learning curves
```
pd.DataFrame(history.history).plot()
```
Yeah everything is fine. But low accuracy.<br>
* Before we run our model on 5 epochs.Now we will run our model on more epochs let num_epochs=30 and i will use `EarlyStopping`callback to stop when our model is overfitting.
```
model = tf.keras.models.Sequential([
layers.Conv2D(32, kernel_size=3, padding="same", activation="relu"),
layers.Conv2D(64, kernel_size=3, padding="same", activation="relu"),
layers.MaxPool2D(),
layers.Flatten(),
layers.Dense(128, activation="relu"),
layers.Dense(10, activation="softmax")
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
optimizer = "adam",
metrics=["accuracy"]
)
history = model.fit(
train_ds,
epochs = 30,
validation_data = val_ds,
callbacks = [tf.keras.callbacks.EarlyStopping(patience=2)]
)
```
It seems we didn't achieve much. Let us plot learning curves.
```
pd.DataFrame(history.history).plot()
test_ds = prepare(test)
model.evaluate(test_ds)
```
`62.16%` accuracy on test set. Can we increase more? Yes we can..<br>
Let me create a new model with new architecture. Previous model is not giving good accuracy. Let us make some changes in model. <br> In this new model I am going to change `Optimizer` to `RMSProp`
## **New model**
```
tf.keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = tf.keras.Sequential([
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dropout(0.2),
layers.Dense(128, activation='relu'),
layers.Dense(3, activation='softmax')
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.001),
metrics=['accuracy']
)
history = model.fit(
train_ds,
epochs=15,
validation_data = val_ds
)
```
I think it is good. Plot learning curves
```
pd.DataFrame(history.history).plot()
```
It is pretty Good. Let us plot some predicitons
```
plot_predictions(test, model, n_rows=6, n_cols=6)
```
Out ot `36` images only one image is wrong 😃
```
test_ds = prepare(test)
model.evaluate(test_ds)
```
`94.59` Awsome.
Thank you guys. We can do more. Try to achive greater than this <br>
Follow my channel Thank You Guys
```
```
| github_jupyter |
```
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
```
# SciPy - Very Quick Guide
- SciPy (pronounced as Sigh Pi) is a scientific python library to perform mathematical, scientific and engineering computations.
- The SciPy library depends on NumPy, which provides convenient and fast N-dimensional array manipulation.
## NumPy Vector/Array
```
import matplotlib.pyplot as plt
import numpy as np
list = [1,2,3,4]
a = np.array(list)
a
np.zeros((2, 3))
np.ones((2, 3))
np.arange(7)
np.linspace(1., 4., 6)
```
## Matrix
```
mat = np.matrix('1 2; 3 4')
mat.H
```
**Transpose**
```
mat.T
```
## K-Means
```
from numpy import vstack, array
from numpy.random import rand
from scipy.cluster.vq import kmeans, vq, whiten
pts = 20
a = np.random.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
b = np.random.multivariate_normal([30, 10],
[[10, 2], [2, 1]],
size=pts)
data = np.concatenate((a, b))
data
```
Before running k-means, it is beneficial to **rescale** each feature dimension of the observation set with whitening. Each feature is **divided by its standard deviation** across all observations to give it unit variance.
```
whitened = whiten(data)
```
computing **K-Means** with K=2 (2 clusters)
```
codebook, distortion = kmeans(whitened, 2)
codebook
plt.scatter(whitened[:, 0], whitened[:, 1])
plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
```
# Interpolate
```
from scipy import interpolate
x = np.linspace(0, 4, 12)
x
y = np.cos(x**2/3+4)
y
plt.plot(x, y, 'o')
f1 = interpolate.interp1d(x, y, kind = 'linear')
f2 = interpolate.interp1d(x, y, kind = 'cubic')
xnew = np.linspace(0, 4,30)
xnew
plt.plot(x, y, 'o', xnew, f1(xnew), '-', xnew, f2(xnew), '--')
plt.legend(['data', 'linear', 'cubic'], loc = 'best')
```
# Linalg
SciPy is built using the optimized ATLAS LAPACK and BLAS libraries. It has very fast **linear algebra** capabilities. All of these linear algebra routines expect an object that can be converted into a **two-dimensional array**. The output of these routines is also a two-dimensional array.
## Eigenvalues and Eigenvectors
An eigenvector does not change direction in a transformation
<img src="https://www.mathsisfun.com/algebra/images/eigen-transform.svg" />
```
from scipy import linalg
A = np.array([[1,2],[3,4]])
A
l, v = linalg.eig(A)
print('eigen values=', l, '\n')
print('eigen vector=', v)
```
## Singular Value Decomposition (SVD)
- Singular Value Decomposition (SVD) is a common dimensionality reduction technique in data science
- The scipy.linalg.svd factorizes the matrix `a` into two unitary matrices `U` and `Vh` and a 1-D array `s` of singular values (real, non-negative) such that a == U.S.Vh, where `s` is a suitably shaped matrix of zeros with the main diagonal `s`.
```
a = np.random.randn(3, 2) + 1.j*np.random.randn(3, 2)
a
U, s, Vh = linalg.svd(a)
print(U, '\n\n', Vh, '\n\n', s)
```
| github_jupyter |
# Algebraic differentiators: A detailed introduction
This notebook includes a detailed introduction into the theoretical background of algebraic differentiators and shows how to use the proposed implementation.
## Content of this notebook
\textbf{Theoretical background}: Time-domain and frequency-domain analysis
\textbf{Numerical differentiation of sample signals}: The numerical estimation of derivatives of simulation and experimental data.
## Theoretical background
### Time-domain analysis
The algebraic derivative estimation methods were initially derived using differential algebraic manipulations of truncated Taylor series \cite{mboup2007b,mboup2009a}. Later works \cite{liu2011a,kiltz2017} derived these filters using an approximation theoretical approach that yields a straightforward analysis of the filter characteristics, especially the estimation delay. Using this approach, the estimate of the $n$-th order derivative of a signal ${t\mapsto y(t)}$ denoted $\hat{y}^{(n)}$ can be approximated as
\begin{equation}
\hat{{y}}^{(n)}(t)=\int_{0}^{T}g^{(n)}_T(\tau)y(t-\tau)\textrm{d}\tau,
\end{equation}
with the filter kernel
\begin{equation}
g(t)=\frac{2}{T}\sum_{i=0}^{N}\frac{P_i^{(\alpha,\beta)}(\vartheta)}{\big\lVert{P_i^{(\alpha,\beta)}\big\rVert}^2}w^{(\alpha,\beta)}\left(\nu(t)\right)P_i^{(\alpha,\beta)}\left(\nu(t)\right).
\end{equation}
In the latter equation $\nu(t)=1-\frac{2}{T}t$, $\big\lVert{x}\big\rVert=\sqrt{\langle x,x\rangle}$ is the norm induced by the inner product
\begin{equation}
\langle x,y\rangle=\int_{-1}^{1}w^{(\alpha,\beta)}(\tau)x(\tau){y(\tau)}\textrm{d}\tau,
\end{equation}
with the weight function
\begin{equation}
w^{(\alpha,\beta)}(\tau)=\begin{cases}
(1-\tau)^\alpha(1+\tau)^\beta,&\quad\tau\in[-1,1],\\
0,&\quad\text{otherwise},
\end{cases}
\end{equation}
$N$ is the degree of the polynomial approximating the signal $y^{(n)}$ in the time window $[t-T,t]$, $T$ is the filter window length, and $\vartheta$ parameterizes the estimation delay as described below.
This approach yields a straightforward analysis of the estimation delay $\delta_t$ and the degree of exactness $\gamma$. The degree of exactness was introduced in \cite{kiltz2017} as the polynomial degree up to which the derivative estimation is exact. If $\gamma=2$ for example, the first and second time derivatives of a polynomial signal of degree two are exact up to an estimation delay. The delay and the degree of exactness are given as
\begin{align}
\gamma&=\left\{\begin{matrix}
n+N+1,\quad &\text{if } N=0\vee\vartheta=p_{N+1,k}^{(\alpha,\beta)}\\[4pt]
n+N,\quad&\text{otherwise,}
\end{matrix}\right.\\
\delta_t&=\left\{\begin{matrix}
\frac{\alpha+1}{\alpha+\beta+2}T,\quad&\text{if } N=0\\[4pt]
\frac{1-\vartheta}{2}T,\quad&\text{otherwise,}
\end{matrix}\right.
\end{align}
with $p_{N+1,k}^{(\alpha,\beta)}$ the $k$-th zero of the Jacobi polynomial of degree $N+1$.
#### Initializing an algebraic differentiator and performing a time-domain analysis
The parameter $\vartheta$ is initialized by default as the largest zero of the Jacobi polynomial of degree $N+1$. This can be changed using the appropriate class member function (see documentation).
```
%matplotlib notebook
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
from algebraicDifferentiator import AlgebraicDifferentiator
import numpy as np
ts = 0.001
# Initialize two different differentiators: For the first one the window length is specified
# while the cutoff frequency is specified for the second
diffA = AlgebraicDifferentiator(N=0,alpha=4.,beta=4,T=0.1, ts=ts)
diffB = AlgebraicDifferentiator(N=1,alpha=4.,beta=4.,T=None,wc = 100, ts=ts)
# Plot the impulse and step response of the filter
t = np.arange(0,0.2,ts/10)
n = 1
gA = diffA.evalKernelDer(t,n)
gB = diffB.evalKernelDer(t,n)
fig, ax = plt.subplots(nrows=1, ncols=2,sharex=True, figsize=(10, 5))
fig.suptitle("Impulse and step response of two algebraic differentiators")
ax[0].plot(t/diffA.get_T(),diffA.evalKernel(t),label=r"diff. A")
ax[0].plot(t/diffB.get_T(),diffB.evalKernel(t),label=r"diff. A")
ax[1].plot(t/diffA.get_T(),diffA.get_stepResponse(t),label=r"\int_{0}^{t}g_{\mathrm{A}}(\tau)\mathrm{d}\tau")
ax[1].plot(t/diffB.get_T(),diffB.get_stepResponse(t),label=r"\int_{0}^{t}g_{\mathrm{B}}(\tau)\mathrm{d}\tau")
ax[0].set_xlabel(r"$\frac{t}{T}$")
ax[1].set_xlabel(r"$\frac{t}{T}$")
ax[0].set_ylabel(r"impulse responses")
ax[1].set_ylabel(r"step responses")
ax[0].legend()
ax[0].grid()
ax[1].grid()
plt.grid()
plt.show()
# Plot the first derivative of the kernel
t = np.arange(-0.01,0.2,ts/10)
n = 1
gA = diffA.evalKernelDer(t,n)
gB = diffB.evalKernelDer(t,n)
fig, ax_g = plt.subplots(nrows=1, ncols=1,
sharex=True, figsize=(10, 5))
fig.suptitle("First derivatives of the kernels of two algebraic differentiators")
ax_g.plot(t/diffA.get_T(),gA*diffA.get_T()**(1+n)/2**n,label="diff. A$")
ax_g.plot(t/diffB.get_T(),gB*diffA.get_T()**(1+n)/2**n,label="$diff. B")
ax_g.set_xlabel(r"$\frac{t}{T}$")
ax_g.set_ylabel(r"first derivative of the kernels")
ax_g.legend()
plt.grid()
plt.show()
```
## Frequency-domain analysis
The fourier transform of an algebraic differentiator is given as
\begin{equation}
\mathcal{G}(\omega)=\sum_{i=0}^{N}\frac{\left(\alpha+\beta+2i+1\right)\mathrm{P}_i^{(\alpha,\beta)}}{\alpha+\beta+i+1}\sum_{k=0}^{i}(-1)^{i-k}\left(\begin{array}ii\\k\end{array}\right)\mathrm{M}_{i,k}^{(\alpha,\beta)}(-\iota\omega T)
\label{eq:FourierTranformation}
\end{equation}
where $\mathrm{M}_{i,k}^{(\alpha,\beta)}$ denotes the hypergeometric Kummer M-Function.
An approximation of the amplitude spectrum of the algebraic differentiator is
\begin{equation}
\left|\mathcal{G}(\omega)\right|\approx\begin{cases}
1,&\quad \left|{\omega}\right|\leq\omega_{\mathrm{c}},\\
\left|{\frac{\omega_{\mathrm{c}}}{\omega}}\right|^{\mu},&\quad \text{otherwise},
\end{cases}
\label{eq:lowPassFilterApp}
\end{equation}
with $\omega_{\mathrm{c}}$ the cutoff frequency of the algebraic differentiator and $\mu=1+\min\{\alpha,\beta\}$.
Lower and upper bounds can be computed for the amplitude spectrum of the algebraic differentiators. If $N=0$ and $\alpha=\beta$, the amplitude reaches $0$ and the lower bound is $0$.
```
omega = np.linspace(1,800,4*10**3)
omegaH = np.linspace(1,800,4*10**3)
# Get phase and amplitude of Fourier transform of filter A
ampA,phaseA = diffA.get_ampAndPhaseFilter(omega)
# Get upper and lower bound for the amplitude of Fourier transform of filter A
uA, lA, mA = diffA.get_asymptotesAmpFilter(omegaH)
# Get phase and amplitude of fourier transform of filter B
ampB,phaseB = diffB.get_ampAndPhaseFilter(omega)
# Get upper and lower bound for the amplitude of Fourier transform of filter B
uB, lB, mB = diffB.get_asymptotesAmpFilter(omegaH)
# Plot results
## PLEASE NOTE: Python will give a warning in the conversion to dB for the differentiato A
## since the amplitude spectrum reaches zero!
fig, ax = plt.subplots(nrows=1, ncols=2,sharex=False, figsize=(10, 5))
fig.suptitle("Frequency analysis of two algebraic differentiators")
ax[0].plot(omega/diffA.get_cutoffFreq(),20*np.log10(ampA),label=r"$|\mathcal{G}_{\mathrm{A}}(\omega)|$")
ax[0].plot(omegaH/diffA.get_cutoffFreq(),20*np.log10(uA),label=r"$|\mathcal{G}_{\mathrm{A}}^+(\omega)|$")
ax[0].plot(omegaH/diffA.get_cutoffFreq(),20*np.log10(lA),label=r"$|\mathcal{G}_{\mathrm{A}}^-(\omega)|$")
ax[0].plot(omegaH/diffA.get_cutoffFreq(),20*np.log10(mA),label=r"$|\mathcal{G}_{\mathrm{A}}^{\Delta}(\omega)|$")
ax[0].set_ylim(top=1)
ax[0].set_xlabel(r"$\frac{\omega}{\omega_{\mathrm{c,A}}}$")
ax[0].set_ylabel(r"amplitudes in dB")
ax[0].legend()
ax[0].grid()
ax[1].plot(omega/diffB.get_cutoffFreq(),20*np.log10(ampB),label=r"$|\mathcal{G}_{\mathrm{B}}(\omega)|$")
ax[1].plot(omegaH/diffB.get_cutoffFreq(),20*np.log10(uB),label=r"$|\mathcal{G}_{\mathrm{B}}^+(\omega)|$")
ax[1].plot(omegaH/diffB.get_cutoffFreq(),20*np.log10(mB),label=r"$|\mathcal{G}_{\mathrm{B}}^{\Delta}(\omega)|$")
ax[1].set_xlabel(r"$\frac{\omega}{\omega_{\mathrm{c,B}}}$")
ax[1].set_ylim(top=1)
ax[1].legend()
plt.grid()
fig.show()
```
## Numerical differentiation of sample signals
### Simulated signal
The derivative of a signal $y:t\rightarrow y(t)$ without any disturbance is estimated: The first differentiator has a delay. The second differentiator is parametrized for a delay free approximation.
```
import sympy as sp
#######################################
# Compute signals and its derivatives
#######################################
a = sp.symbols('a_0:3')
t = sp.symbols('t')
y = a[0]*sp.exp(-a[1]*t)*sp.sin(a[2]*t)
# Derivative to be estimated
dy = sp.diff(a[0]*sp.exp(-a[1]*t)*sp.sin(a[2]*t),t,1)
d2y = sp.diff(a[0]*sp.exp(-a[1]*t)*sp.sin(a[2]*t),t,2)
aeval = {'a_0':1,'a_1':0.1,'a_2':4}
# Evaluate signal and true derivative
teval = np.arange(0,20,ts)
for ai in a:
y = y.subs({ai:aeval[repr(ai)]})
dy = dy.subs({ai:aeval[repr(ai)]})
d2y = d2y.subs({ai:aeval[repr(ai)]})
yeval = sp.lambdify(t, y, "numpy")
yeval = yeval(teval)
dyeval = sp.lambdify(t, dy, "numpy")
dyeval = dyeval(teval)
d2yeval = sp.lambdify(t, d2y, "numpy")
d2yeval = d2yeval(teval)
#######################################
# Parametrize diffB to be delay-free
#######################################
# Set the parameter \vartheta
diffB.set_theta(1,False)
# Print the characteristics of the differentiator
diffB.printParam()
#######################################
# Estimate derivatives
#######################################
# Filter the signal y
xAppA = diffA.estimateDer(0,yeval)
xAppB = diffB.estimateDer(0,yeval)
# Estimate its first derivative
dxAppA = diffA.estimateDer(1,yeval)
dxAppB = diffB.estimateDer(1,yeval)
# Estimate its 2nd derivative
d2xAppA = diffA.estimateDer(2,yeval)
d2xAppB = diffB.estimateDer(2,yeval)
#######################################
# Plot results
#######################################
fig, (fy,fdy,f2dy) = plt.subplots(nrows=1, ncols=3,sharex=True, figsize=(10, 5))
fig.subplots_adjust( wspace=0.5)
fy.plot(teval,yeval,label='true signal')
fy.plot(teval,xAppA,label='diff A')
fy.plot(teval,xAppB,label='diff B')
fy.set_xlabel(r'$t$')
fy.set_ylabel(r'signals')
fdy.plot(teval,dyeval,label='true signal')
fdy.plot(teval,dxAppA,label='diff A')
fdy.plot(teval,dxAppB,label='diff B')
fdy.set_xlabel(r'$t$')
fdy.set_ylabel(r'first der. of signals')
f2dy.plot(teval,d2yeval,label='true signal')
f2dy.plot(teval,d2xAppA,label='diff A')
f2dy.plot(teval,d2xAppB,label='diff B')
f2dy.set_xlabel(r'$t$')
f2dy.set_ylabel(r' second der. of signal')
plt.legend()
plt.show()
```
### Estimation of derivative of a measured signal
The measurements are loaded from .dat file. The signal is filtered and the first and second derivatives are estimated using an algebraic differentiator.
```
from os.path import dirname, join as pjoin
import scipy.io as sio
# Load measurements
data = np.loadtxt('data90.dat')
tmeas = data[:,0]
ts = tmeas[2]-tmeas[1]
xmeas = data[:,2]
# Estimate derivatives
diffA = AlgebraicDifferentiator(N=0,alpha=4,beta=4,T=None,wc=25, ts=ts,display=True)
xAppA = diffA.estimateDer(0,xmeas)
dxAppA = diffA.estimateDer(1,xmeas)
d2xAppA = diffA.estimateDer(2,xmeas)
# Plot results
fig, (fy,fdy, fd2y) = plt.subplots(nrows=1, ncols=3,sharex=True, figsize=(10, 5))
fig.subplots_adjust( wspace=0.5)
fy.plot(tmeas,xmeas,label='$y(t)$')
fy.plot(tmeas,xAppA,label='$\hat y(t)$')
fy.set_xlabel(r'$t$ in seconds')
fy.set_ylabel(r'signals')
fy.legend()
fdy.plot(tmeas,dxAppA,label='$\dot\hat y(t)$')
fdy.set_xlabel(r'$t$ in seconds')
fdy.legend()
fd2y.plot(tmeas,d2xAppA,label='$\ddot\hat y(t)$')
fd2y.set_xlabel(r'$t$ in seconds')
plt.legend()
plt.show()
```
# References
[<a id="cit-mboup2007b" href="#call-mboup2007b">1</a>] M. Mboup, C. Join and M. Fliess, ``_A revised look at numerical differentiation with an application to nonlinear feedback control_'', 15th Mediterranean Conf. on Control $\&$ Automation, June 2007. [online](https://ieeexplore.ieee.org/document/4433728)
[<a id="cit-mboup2009a" href="#call-mboup2009a">2</a>] Mboup M., Join C. and Fliess M., ``_Numerical differentiation with annihilators in noisy environment_'', Numerical Algorithms, vol. 50, number 4, pp. 439--467, 2009. [online](https://link.springer.com/article/10.1007/s11075-008-9236-1)
[<a id="cit-liu2011a" href="#call-liu2011a">3</a>] Liu D.-Y., Gibaru O. and Perruquetti W., ``_Differentiation by integration with Jacobi polynomials_'', Journal of Computational and Applied Mathematics, vol. 235, number 9, pp. 3015 - 3032, 2011. [online](http://www.sciencedirect.com/science/article/pii/S0377042710006734)
[<a id="cit-kiltz2017" href="#call-kiltz2017">4</a>] L. Kiltz, ``_Algebraische Ableitungsschätzer in Theorie und Anwendung_'', 2017. [online](https://scidok.sulb.uni-saarland.de/handle/20.500.11880/26974)
| github_jupyter |
# Homework 1: Python basics and a little plotting
** Submit this notebook to bCourses to receive a credit for this assignment.**
Please complete this homework assignment in code cells in the iPython notebook. Include comments in your code when necessary. Enter your name in the cell at the top of the notebook, and rename the notebook [email_name]_HW01.ipynb, where [email_name] is the part of your UCB email address that precedes "@berkeley.edu". Please also submit a PDF of the jupyter notebook to bcourses.
## Problem 1: Satellite Altitudes
[Adapted from Newman, Exercise 2.2] A satellite is to be launched into a circular orbit around the Earth so that it orbits the planet once every $T$ seconds. The altitude $h$ above the Earth's surface that the satellite must have is $$ h = \left( \frac{G M T^2}{4 \pi^2} \right)^{1/3} - R, $$ where $G = 6.67 \times 10^{-11}~\text{m}^3~\text{kg}^{-1}~\text{s}^{-2}$ is Newton's gravitational constant, $M = 5.97 \times 10^{24}~\text{kg}$ is the mass of the Earth, and $R = 6371~\text{km}$ is its radius.
**1a.** Write a program that, for a given value of $T$ (entered as a variable T in a cell), calculates and prints out the correct altitude in meters, kilometers, and miles, with one decimal place for each result.
*Output for 1a*: When the code cell for this part of the problem is entered, it should specify (in the comments or the Markdown cell above) what units of $T$ are assumed. The program should print out the correct altitude in meters, kilometers, and miles, with one decimal place for each result.
```
import numpy as np
PI = np.pi #the numpy library has a lot of useful mathematical constants
def find_altitude(T):
"""This function calculates and prints the altitude above Earth's surface
for a satellite with orbital period T (input in seconds)"""
G = 6.67e-11 #Gravitational constant, units: m^3/kg/s^2
M = 5.97e24 #mass of Earth, units: kg
R = 6371e3 #radius of Earth, units: m
h = ... #use the formula given above to to find the altitude
# Now use print statements and your favorite formatting method to print the results.
```
*Output for 1b and 1c:* Use code cells to carry out the desired calculations, and Markdown cells to present and discuss your results.
**1b.** Use your program to calculate the altitudes of satellites that orbit the Earth once a day (so-called "geosynchronous" orbit), once every 90 minutes, and once every 45 minutes. What do you conclude from the last of these calculations?
```
# Since you defined a function above, you can just call it with different input values.
```
**1c.** Technically a geosynchronous satellite is one that orbits the Earth once per *sidereal day*, which is 23.93 hours, not 24 hours. Why is this? And how much difference will it make to the altitude of the satellite?
#### Hints
Be very careful of units! Unlike wolframalpha.com, python is oblivious to what units your various numbers represent. You need to carefully keep track of them to make sure that your units work out. Not converting units properly can lead to [very expensive disasters](https://en.wikipedia.org/wiki/Mars_Climate_Orbiter) or just wrong scientific results ...
Remember to comment your code and use descriptive variable names so others (and future you) can understand what you're doing!
## Problem 2: Perfect Hardboiled Eggs
[Adapted from Langtangen, Exercise 1.12. You may derive the formula in Physics 112 or Physics 89] As an egg cooks, the proteins first denature and then coagulate. When the temperature exceeds a critical point, reactions begin and proceed faster as the temperature increases. In the egg white the proteins start to coagulate for temperatures above 63 C, while in the yolk the proteins start to coagulate for temperatures above 70 C. For a soft boiled egg, the white needs to have been heated long enough to coagulate at a temperature above 63 C, but the yolk should not be heated above 70 C. For a hard boiled egg, the center of the yolk should be allowed to reach 70 C.
The following formula expresses the time $t$ it takes (in seconds) for the center of the yolk to reach the temperature $T_y$ (in Celsius degrees): $$ t = \frac{M^{2/3} c \rho^{1/3}}{K \pi^2 (4\pi/3)^{2/3}} \ln \left[ 0.76 \frac{T_0 - T_w}{T_y - T_w} \right] . $$ Here, $M$, $\rho$, $c$, and $K$ are properties of the egg:
* $M$ is the mass,
* $\rho$ is the density,
* $c$ is the specific heat capacity, and
* $K$ is the thermal conductivity.
Relevant values are
* $M = 64~\text{g}$ for a large egg (USA size XL: en.wikipedia.org/wiki/Chicken_egg_sizes),
* $\rho = 1.0378~\text{g cm}^{-3}$,
* $c = 3.7~\text{J g}^{-1}\,\text{K}^{-1}$, and
* $K = 5.4 \cdot 10^{-3}~\text{W cm}^{-1}\,\text{K}^{-1}$.
Furthermore,
* $T_w$ is the temperature (in C degrees) of the boiling water, and
* $T_0$ is the original temperature (in C degrees) of the egg before being put in the water.
Suppose we want our eggs hard-boiled. Implement the formula in a program, set $T_w = 100~\text{C}$ and $T_y = 70~\text{C}$, and compute $t$ for a large egg taken from the fridge ($T_0 = 4~\text{C}$) and from room temperature ($T_0 = 20~\text{C}$). Also compute the results for a small egg ($M = 42~\text{g}$).
*Output for 2:*
When you run your code cell, it should produce the following text, with your numbers instead of the `TTT`, `MMM`, and `SSS` placeholders:
To hard-boil a large egg taken directly from the fridge, cook it for TTT minutes (MMM min, SSS sec).
To hard-boil a small egg taken directly from the fridge, cook it for TTT minutes (MMM min, SSS sec).
To hard-boil a large egg starting from room temperature, cook it for TTT minutes (MMM min, SSS sec).
To hard-boil a small egg starting from room temperature, cook it for TTT minutes (MMM min, SSS sec).
The `TTT` placeholders should be values in minutes to two decimal places. The `MMM` and `SSS` placeholders should be rounded to the nearest minute/second, with no decimal places. For example,
To hard-boil a large egg taken directly from the fridge, cook it for 56.78 minutes (56 min 47 sec).
```
import numpy as np
PI = np.pi #the numpy library has a lot of useful mathematical constants
def find_time(T_initial, M_egg):
"""This function calculates and returns the time (in seconds)
for the center of the yolk to reach the temperature T_yolk, given
the initial egg temperature T_initial (in degrees Celsius). The mass
of the egg (in grams) is also taken as an input."""
T_water = 100 #temperature of boiling water, units: degrees Celsius
T_yolk = 70 #temperature of cooked yolk, units: degrees Celsius
rho = 1.0378 #density of egg, units: grams/cm^3
c = 3.7 #specific heat capacity, units: Joules/grams/(degrees Kelvin)
K = 5.4e-3 #thermal conductivity, units: Watts/cm/(degrees Kelvin)
t = ... # Use the formula given above to to find the time needed to cook the egg.
return t
# You can now use your defined function to find the times needed to cook the small/large eggs with different starting temperatures
```
#### Hints
Writing the entire formula in one line is difficult to type, difficult to read, difficult to debug---and you have to retype the entire calculation just to change one piece of it. Try breaking it down in smaller chunks assigned to variables, and combine those chunks to produce the final result.
Beware of integer division!
Remember to comment your code and use descriptive variable names so others (and future you) can understand what you're doing!
## Problem 3: Estimating Half-Life
[Adapted from Ayars, Problem 0-3] The data in file [Ba137.txt](https://raw.githubusercontent.com/celegante/code_chapter_0-_github/master/Ba137.txt) is actual data from a radioactive decay experiment (you should already have the file from the Workshop). The first column is the number of decays $N$, the second is the time $t$ in seconds. We'd like to know the half-life $t_{1/2}$ of $^{137}\text{Ba}$. It should follow the decay equation $$ N = N_0 e^{-\lambda t} $$ where $\lambda = \frac{\log 2}{t_{1/2}}$. Using the techniques you've learned from the lecture and workshop, load the data from the file Ba137.txt into appropriately-named variables. Experiment with different values of $N$ and $\lambda$ and plot the resulting equation on top of the data. (Python uses `exp()` to calculate the exponential function: i.e. `y = A*exp(-L*time)` ) Don't worry about automating this process yet (unless you *really* want to!) just try adjusting things by hand until the equation matches the data pretty well. What is your best estimate for $t_{1/2}$?
*Output for 3:*
When you run your code cell, it should produce a well-labeled plot with both the data and your curve of best fit. It should also print a message to the terminal which says, "My best estimate for the half life is $x$", where $x$ is your estimate with units.
```
import numpy as np
import matplotlib.pyplot as plt
# Starting by reading in the data from the text file
counts, times = np.loadtxt("Ba137.txt", unpack = True)
# 'counts' and 'times' are both Python lists; you can try printing to get an idea of what values they contain
# The numpy library has an exponential function; use np.exp(x) to return e^x
```
#### Hints
Remember to comment your code and use descriptive variable names so others (and future you) can understand what you're doing!
| github_jupyter |
```
# -*- coding: utf-8 -*-
from jaqs.data import DataApi
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
def get_fundlist(api, invest_type, invest_style):
df, msg = api.query(
view="lb.mfInfo",
fields="invest_type,invest_style,status",
filter="",
data_format='pandas')
#print(df, msg)
df = df[(df['invest_type'] == invest_type)
& (df['invest_style'] == invest_style)
& (df['status'] == 101001000)
& (df['name'].apply(lambda s: 'A' not in s))]
return df
def get_fundnav(api, symbol, start_date, end_date):
df, msg = api.query(
view="lb.mfNav",
fields="price_date, nav_accumulated",
filter="start_pdate=" + str(start_date) + "&end_pdate=" + str(end_date) + "&symbol=" + symbol,
data_format='pandas')
if df is None:
print(df, msg)
df.index = df['price_date'].astype(np.integer)
df.sort_index(inplace=True)
return df
def get_index_map(api, symbols, price_date):
symbollist = symbols.split(",")
result = {}
for symbol in symbollist:
df, msg = api.query(
view="lb.mfTrackingIndex",
fields="",
filter="symbol=" + symbol + "&trade_date=" + str(price_date),
data_format='pandas')
if df is not None and len(df) > 0:
result[symbol] = df.loc[0]['index_code']
time.sleep(0.01)
return result
def get_index_daily(api, symbol, start, end):
df, msg = api.daily(
symbol=symbol,
fields="",
start_date=start,
end_date=end,
data_format='pandas')
if df is None:
print(df, msg)
df.index = df['trade_date']
df.sort_index(inplace=True)
return df
def cal_active_return(api, symbol, bench, start, end):
df_nav = get_fundnav(api, symbol, start, end)
df_idx = get_index_daily(api, bench, start, end)
if df_idx.empty or df_nav.empty:
return None, None, None
strategy_value = df_nav['nav_accumulated']
bench_value = df_idx['close']
market_values = pd.concat([strategy_value, bench_value], axis=1).fillna(method='ffill')
market_values.columns = ['strat', 'bench']
df_returns = market_values.pct_change(periods=1).fillna(0.0)
df_returns = df_returns.join((df_returns.loc[:, ['strat', 'bench']] + 1.0).cumprod(), rsuffix='_cum')
df_returns.loc[:, 'active_cum'] = df_returns['strat_cum'] - df_returns['bench_cum'] + 1
df_returns.loc[:, 'active'] = df_returns['active_cum'].pct_change(1).fillna(0.0)
start = pd.to_datetime(start, format="%Y%m%d")
end = pd.to_datetime(end, format="%Y%m%d")
years = (end - start).days / 365.0
active_cum = df_returns['active_cum'].values
max_dd_start = np.argmax(np.maximum.accumulate(active_cum) - active_cum) # end of the period
max_dd_end = np.argmax(active_cum[:max_dd_start]) # start of period
max_dd = (active_cum[max_dd_end] - active_cum[max_dd_start]) / active_cum[max_dd_start]
performance_metrics = dict()
performance_metrics['Annual Return (%)'] =\
100 * (np.power(df_returns.loc[:, 'active_cum'].values[-1], 1. / years) - 1)
performance_metrics['Annual Volatility (%)'] =\
100 * (df_returns.loc[:, 'active'].std() * np.sqrt(242))
performance_metrics['Sharpe Ratio'] = (performance_metrics['Annual Return (%)']
/ performance_metrics['Annual Volatility (%)'])
risk_metrics = dict()
risk_metrics['Beta'] = np.corrcoef(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])[0, 1]
risk_metrics['Maximum Drawdown (%)'] = max_dd * 100
risk_metrics['Maximum Drawdown start'] = df_returns.index[max_dd_start]
risk_metrics['Maximum Drawdown end'] = df_returns.index[max_dd_end]
return performance_metrics, risk_metrics, df_returns
'''
df_return = (df_nav_curr['nav_accumulated'] / df_nav_start['nav_adjusted']) - 1
df_return = df_return.sort_values().dropna()
fig, ax1 = plt.subplots(1, 1, figsize=(10, 4))
ax1.hist(df_return, bins=200)
ax1.set(xlabel='Return', ylabel='Number', title='Return List')
fig.show()
# 统计指标
print(df_return.describe())
# 中位数
np.median(df_return)
print(len(df_return))
print(df_return.tail(20))
pass
'''
api = DataApi('tcp://data.tushare.org:8910')
username = "phone"
password = "token"
df, msg = api.login(username, password)
print(df, msg)
df = get_fundlist(api, u'股票型', u'增强指数型')
symbols = ",".join(df['symbol'])
start_date = 20161230
curr_date = 20171215
index_map = get_index_map(api, symbols, start_date)
print(index_map)
indicators = list()
for (symbol, index) in index_map.items():
performance_metrics, risk_metrics, df_returns = cal_active_return(api, symbol, index, start_date, curr_date)
if performance_metrics is None:
continue
indicators.append((symbol,
index,
performance_metrics['Annual Return (%)'],
performance_metrics['Annual Volatility (%)'],
performance_metrics['Sharpe Ratio'],
df_returns['strat_cum'].iat[-1],
df_returns['bench_cum'].iat[-1],
risk_metrics['Beta'],
risk_metrics['Maximum Drawdown (%)'],
risk_metrics['Maximum Drawdown start'],
risk_metrics['Maximum Drawdown end'])
)
labels = ['symbol', 'index', 'AnnualReturn', 'AnnualVolatility', 'SharpeRatio', 'StratCumReturn', 'BenchCumReturn', 'Beta', 'MaximumDrawdown', 'MaximumDrawdownStart', 'MaximumDrawdownEnd']
df = pd.DataFrame.from_records(indicators, columns=labels)
df = df.sort_values('SharpeRatio')
df
df.sort_values('AnnualReturn')
df.describe()
```
| github_jupyter |
### Subprocesses
One of the biggest strengths of Python is that it can be used as a *glue* language. <br>
It can 'glue' together a series of programs into a flexible and highly extensible pipline.
### Why subprocesses
One of the most common, yet complicated, tasks that most programming languages need to do is creating new processes. <br>
This could be as simple as seeing what files are present in the current working directory (`ls`) or as complicated as creating a program workflow that *pipes* output from one program into another program's input. <br/><br/>
Many such tasks are easily taken care of through the use of Python libraries and modules (`import`) that *wrap* the programs into Python code, effectively creating Application Programming Interfaces (API). <br/><br/>
However, there are many use cases that require the user to make calls to the terminal from ***within*** a Python program.
#### Operating System Conundrum
As many in this class have found out, while Python can be installed on most operating systems; doing the same thing in one operating system (Unix) may not always yield the same results in another (Windows).<br/><br/>
The very first step to making a program **"OS-agnostic"** is through the use of the `os` module.
```
import os
```
https://docs.python.org/3/library/os.html
```
#dir(os)
help(os.getcwd)
os.getcwd()
help(os.chdir)
# The name of the operating system dependent module imported.
# The following names have currently been registered: 'posix', 'nt', 'java'
# Portable Operating System Interface - IEEE standard designed to facilitate application portability
# (Windows) New Technology - a 32-bit operating system that supports preemptive multitasking
#
os.name
# A list of strings that specifies the search path for modules.
import sys
sys.path
# A mapping object that contains environment variables and their values.
os.environ
# A mapping object representing the string environment.
print(os.environ['HOME'])
#Return the value of the environment variable key if it exists,
#or default if it doesn’t. key, default and the result are str.
print(os.getenv("HOME"))
print(os.getenv("PATH"))
# Returns the list of directories that will be searched for a named executable,
#similar to a shell, when launching a process.
# env, when specified, should be an environment variable dictionary to lookup the PATH in.
# By default, when env is None, environ is used.
os.get_exec_path()
```
The `os` module wraps OS-specific operations into a set of standardized commands. <br>
For instance, the Linux end-of-line (EOL) character is a `\n`, but `\r\n` in Windows. <br>
In Python, we can just use the following:
```
# EOL - for the current (detected) environment
'''
The string used to separate (or, rather, terminate) lines on the current platform.
This may be a single character, such as '\n' for POSIX, or multiple characters,
for example, '\r\n' for Windows.
Do not use os.linesep as a line terminator when writing files opened in text mode (the default);
use a single '\n' instead, on all platforms.
'''
os.linesep
```
Another example, in a Linux environment, one must use the following command to list the contents of a given directory:
```
ls -alh
```
In Windows, the equivalent is as follows:
```
dir
```
Python allows users to do a single command, in spite of the OS:
```
# List directory contents
os.listdir("ProjectCM")
```
However, the biggest issue for creating an OS-agnostic program is ***paths*** <br/>
Windows: `"C:\\Users\\MDS\\Documents"`<br/>
Linux: `/mnt/c/Users/MDS/Documents/`<br/><br/>
Enter Python:
```
# path joining from pwd
pwd = os.getcwd()
print(pwd)
print(os.path.dirname(pwd))
os.path.join(pwd,"ProjectCM","demoCM","test.py")
```
### `subprocess`
If you Google anything on how to run shell commands, but don't specify Python 3.x, you will likely get an answer that includes `popen`, `popen2`, or `popen3`. These were the most prolific ways to *open* a new *p*rocess. In Python 3.x, they encapsulated these functions into a new one called `run` available through the `subprocess` library.
```
# Import and alias
import subprocess as sp
```
#### `check_output`
```
help(sp.check_output)
# check_output returns a bytestring by default, so I set encoding to convert it to strings.
# [command, command line arguments]
# change from bytes to string using encoding
sp.check_output(["echo","test"],encoding='utf_8')
sp.check_output("echo test",encoding='utf_8', shell = True)
# demonstration, might not work if test.py does not have the parsing code
sp.check_output([os.path.join(pwd,"test.py"),"[1,2,3]"],encoding='utf_8')
```
The first thing we will look are trivial examples that demonstrate just capturing the *output* (stdout) of a program
However, while the `check_output` function is still in the `subprocess` module, it can easily be converted into into a more specific and/or flexible `run` function signature.
#### `run`
```
help(sp.run)
sub = sp.run(
[
'echo', # The command we want to run
'test' # Arguments for the command
],
encoding='utf_8', # Converting byte code
stdout=sp.PIPE, # Where to send the output
check=True # Whether to raise an error if the process fails
)
sub
sub = sp.run(
'echo test', # The command we want to run
encoding='utf_8', # Converting byte code
stdout=sp.PIPE, # Where to send the output
check=True , # Whether to raise an error if the process fails
shell = True
)
sub
[elem for elem in dir(sub) if not elem.startswith("__")]
print(sub.stdout)
```
The main utility of `check_output` was to capture the output (stdout) of a program. <br>
By using the `stdout=subprocess.PIPE` argument, the output can easily be captured, along with its return code. <br>
A return code signifies the program's exit status: 0 for success, anything else otherwise
```
sub.returncode
```
With our `run` code above, our program ran to completetion, exiting with status 0. The next example shows a different status.
```
sp.run(
'exit 1', # Command & arguments
shell = True # Run from the shell
)
```
However, if the `check=True` argument is used, it will raise a `CalledProcessError` if your program exits with anything different than 0. This is helpful for detecting a pipeline failure, and exiting or correcting before attempting to continue computation.
```
sp.run(
'exit 1', # Command & arguments
shell = True, # Run from the shell
check = True # Check exit status
)
sub = sp.run(
'exit 1', # Command & arguments
shell = True, # Run from the shell
# check = True # Check exit status
)
if (sub.returncode != 0):
print(f"Exit code {sub.returncode}. Expected 0 when there is no error.")
```
#### Syntax when using `run`:
1. A list of arguments: `subprocess.run(['echo', 'test', ...], ...)`
2. A string and `shell`: `subprocess.run('exit 1', shell = True, ...)`
The preferred way of using `run` is the first way. <br>
This preference is mainly due to security purposes (to prevent shell injection attacks). <br>
It also allows the module to take care of any required escaping and quoting of arguments for a pseudo-OS-agnostic approach.
There are some guidelines though:
1. Sequence (list) of arguments is generally preferred
2. A str is appropriate if the user is just calling a program with no arguments
3. The user should use a str to pass argument if `shell` is `True`<br/>
Your next questions should be, "What is `shell`?"
`shell` is just your terminal/command prompt. This is the environment where you call `ls/dir` in. It is also where users can define variables. More importantly, this is where your *environmental variables* are set...like `PATH`.<br/><br/>
By using `shell = True`, the user can now use shell-based environmental variable expansion from within a Python program.
```
sp.run(
'echo $PATH', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
) # Look at the output
p1 = sp.run(
'sleep 5; echo done1', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
)
print(p1)
p2 = sp.run(
'echo done2', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
)
print(p2)
```
For the most part, you shouldn't need to use `shell` simply because Python has modules in the standard library that can do most of the shell commands. For example `mkdir` can be done with `os.mkdir()`, and `$PATH` can be retrieved using os.getenv("PATH") or os.get_exec_path() as shown above.
#### Blocking vs Non-blocking
The last topic of this lecture is "blocking". This is computer science lingo/jargon for whether or not a program ***waits*** until something is complete before moving on. Think of this like a really bad website that takes forever to load because it is waiting until it has rendered all its images first, versus the website that sets the formatting and text while it works on the images.
1. `subprocess.run()` is blocking (it waits until the process is complete)
2. `subprocess.Popen()` is non-blocking (it will run the command, then move on)
***Most*** use cases can be handled through the use of `run()`.<br>
`run()` is just a *wrapped* version of `Popen()` that simplifies use. <br>
However, `Popen()` allows the user a more flexible control of the subprocess call. <br>
`Popen()` can be used similar way as run (with more optional parameters).
An example use case for `Popen()` is if the user has some intermediate data that needs to get processed, but the output of that data doesn't necessarily affect the rest of the pipeline.
#### `Popen`
```
p1 = sp.Popen(
'sleep 5; echo done1', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
)
print(p1)
p2 = sp.Popen(
'echo done2', # Command
shell = True, # Use the shell
stdout=sp.PIPE, # Where to send it
encoding='utf_8' # Convert from bytes to string
)
print(p2)
print("processes ran")
print(p1.stdout.read())
print(p2.stdout.read())
print("processes completed")
# Use context manager to handle process while it is running,
# and gracefully close it
with sp.Popen(
[
'echo', # Command
'here we are' # Command line arguments
],
encoding='utf_8', # Convert from byte to string
stdout=sp.PIPE # Where to send it
) as proc: # Enclose and alias the context manager
print(
proc.stdout.read() # Look at the output
)
for elem in dir(proc):
if not elem.startswith('_'):
print(elem)
```
#### ***NOTE***: From here on out, there might be different commands used for **Linux** / **MacOS** or **Windows**
```
#test_pipe.txt - a file to be used to demonstrate pipe of cat and sort
!echo testing > test_pipe.txt
!echo the >> test_pipe.txt
!echo subprocess >> test_pipe.txt
!echo pipe >> test_pipe.txt
# mac OS
p1 = sp.Popen(['cat','test_pipe.txt'], stdout=sp.PIPE, encoding='utf_8')
# windows OS
# p1 = sp.Popen(['type','test_pipe.txt'], stdout=sp.PIPE, encoding='utf_8')
print(p1.stdout.read())
# mac OS
p1 = sp.Popen(['cat','test_pipe.txt'], stdout=sp.PIPE, encoding='utf_8')
# windows OS
# p1 = sp.Popen(['type','test_pipe.txt'], stdout=sp.PIPE, encoding='utf_8')
p2 = sp.Popen(['sort'], stdin=p1.stdout, stdout=sp.PIPE, encoding='utf_8')
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits
output = p2.communicate()[0]
print(output)
```
`Popen` can create background processes, shell-background-like behavior means not blocking. <br>
`Popen` has a lot more functionality than `run`.
```
sub_popen = sp.Popen(
[
'echo', # Command
'test', # Command line arguments
],
encoding='utf_8', # Convert from byte to string
stdout=sp.PIPE # Where to send it
)
for j in dir(sub_popen):
if not j.startswith('_'):
print(j)
# sub - returned by run
for j in dir(sub):
if not j.startswith('_'):
print(j)
sub_popen.kill() # Close the process
```
Example creating child process.<br>
https://pymotw.com/3/subprocess/
A collection of `Popen` examples: <br>
https://www.programcreek.com/python/example/50/subprocess.Popen
## SQL
#### What is a database?
* Is an organized collection of data (files)
* A way to store and retrieve that information
* A relational database is structured to recognize relations between the data elements
E.g. NCBI Gene <br>
https://www.ncbi.nlm.nih.gov/gene/statistics
https://www.researchgate.net/profile/Adam_Richards3/publication/282134102/figure/fig3/AS:289128232046602@1445944950296/Database-entity-diagram-Data-collected-from-NCBI-the-Gene-Ontology-and-UniProt-are.png
<img src = "https://www.researchgate.net/profile/Adam_Richards3/publication/282134102/figure/fig3/AS:289128232046602@1445944950296/Database-entity-diagram-Data-collected-from-NCBI-the-Gene-Ontology-and-UniProt-are.png" width = "700"/>
#### More database examples:
* The Python dictionary qualifies
* A spreadsheet is a type of database – a table
* A fasta file could be considered a database
#### Why use databases?
* Databases can handle very large data sets
* Databases scale well
* Databases are concurrent
* Databases are fault-tolerant
* Your data has a built-in structure to it
* Information of a given type is typically stored only once
* You can query the data in a database and easily create meaningful reports
* You can relate data from different tables
#### What is the Structured Query Language (SQL) ?
* SQL is the standard language for relational database management systems (ANSI)
* SQL is used to communicate with a database
* SQL can be used to: add, remove, modify, request data
* SQL is a declarative language - you describe what you want
#### Relational Database Management Systems
* Software programs such as Oracle, MySQL, SQLServer, DB2, postgreSQL are the backbone on which a specific database can be built
* They are called RDBMS (relational database management systems)
* They handle the data storage, indexing, logging, tracking and security
* They have a very fine-grained way of granting permissions to users at the level of commands that may be used
* Create a database
* Create a table
* Update or insert data
* View certain tables ... and many more
* An important part of learning databases is to understand the type of data which is stored in columns and rows.
* Likewise when we get to the database design section, it is critically important to know what type of data you will be modeling and storing (and roughly how much, in traditional systems)
* Exactly which types are available depends on the database system
#### SQLite
* SQLite is a software library that implements a self-contained, serverless, zero-configuration, embedded high-reliability, full-featured, public-domain SQL database engine. SQLite is the most widely deployed database engine in the world (https://sqlite.org/)
* A SQLite database is a single file that is transportable
* Check-out bioconductor (annotation) packages that come with sqlite databases
* hgu133a.db
* https://bioconductor.org/packages/release/data/annotation/html/hgu133a.db.html
* org.Hs.eg.db - Genome wide annotation for Human, primarily based on mapping using Entrez Gene identifiers
* https://bioconductor.org/packages/release/data/annotation/html/org.Hs.eg.db.html
##### SQLite uses a greatly simplified set of data types:
* INTEGER - numeric
* REAL - numeric
* TEXT – text of any length
* Dates are held as text
* BLOB – binary large objects
* Such as images
```
from sqlite3 import connect
# the file org.Hs.eg.sqlite should be in the datasets folder
# if you pulled the info from the class github repo
# otherwise retrieve from the class github repo or canvas
conn = connect('../datasets/org.Hs.eg.sqlite')
curs = conn.cursor()
# close cursor and connection
curs.close()
conn.close()
conn = connect('../datasets/org.Hs.eg.sqlite')
curs = conn.cursor()
```
There is a special sqlite_master table that describes the contents of the database
Major SQL commands: SELECT, INSERT, DELETE, UPDATE
#### SELECT - Retrieves data from one or more tables and doesn’t change the data at all
* SELECT * (means all columns), or the comma separated names of the columns of data you wish to return
* They will return (left to right) in the order received.
* FROM is the table source or sources (comma separated)
* WHERE (optional) is the predicate clause: conditions for the query
* Evaluates to True or False for each row
* This clause almost always includes Column-Value pairs.
* Omitting the Where clause returns ALL the records in that table.
* Note: the match is case sensitive
* ORDER BY (optional) indicates a sort order for the output data
* default is row_id, which can be very non-intuitive
* ASCending or DESCending can be appended to change the sort order. (ASC is default)
* In most SQL clients, the ";" indicates the end of a statement and requests execution
SELECT - which columns to include in the result, use * for all columns <br>
FROM - which tables to use <br>
WHERE (optional) - predicate clause, which rows to include
'*' selects ALL rows and ALL columns and returns them by column order and row_id
```
sql = '''SELECT * FROM sqlite_master;'''
x = curs.execute(sql)
```
See result header
```
dir(x)
curs.description
```
See result
```
for row in curs: print(row)
```
WHERE clause example
```
sql = '''
SELECT name,type
FROM sqlite_master
WHERE type= "table";
'''
curs.execute(sql)
for row in curs: print(row)
def get_header(cursor):
'''Makes a header row from the cursor description. Its tab
delimited.
Arguments:
cursor: a cursor after a select query
Returns:
string: A string consisting of the column names separated by tabs, no new line
'''
return '\t'.join([row[0] for row in cursor.description])
# colNames = []
# for row in cursor.description:
# colNames.append(row[0])
# return '\t'.join(colNames)
print(get_header(curs))
sql = '''
SELECT *
FROM go_bp LIMIT 10;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
```
http://geneontology.org/docs/guide-go-evidence-codes/
* Inferred from Experiment (EXP)
* Inferred from Direct Assay (IDA)
* Inferred from Physical Interaction (IPI)
* Inferred from Mutant Phenotype (IMP)
* Inferred from Genetic Interaction (IGI)
* Inferred from Expression Pattern (IEP)
Aliasing column names to make them easier to understand
```
sql = '''
SELECT * FROM gene_info LIMIT 5;
'''
curs.execute(sql)
for i in curs.description: print(i[0])
for row in curs: print(row)
sql = '''
SELECT _id 'Gene Identifier', symbol "Gene Symbol"
FROM gene_info LIMIT 5;
'''
curs.execute(sql)
curs.description
curs.fetchall()
sql = '''
SELECT _id 'ID', symbol "Symbol"
FROM gene_info LIMIT 10;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
#select all from go_bp
sql = '''
SELECT *
FROM go_bp
WHERE go_id = 'GO:0002576'
LIMIT 5'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
```
http://geneontology.org/docs/guide-go-evidence-codes/
* Inferred from Experiment (EXP)
* Inferred from Direct Assay (IDA)
* Inferred from Physical Interaction (IPI)
* Inferred from Mutant Phenotype (IMP)
* Inferred from Genetic Interaction (IGI)
* Inferred from Expression Pattern (IEP)
* Inferred from High Throughput Experiment (HTP)
* Inferred from High Throughput Direct Assay (HDA)
* Inferred from High Throughput Mutant Phenotype (HMP)
* Inferred from High Throughput Genetic Interaction (HGI)
* Inferred from High Throughput Expression Pattern (HEP)
* Inferred from Biological aspect of Ancestor (IBA)
* Inferred from Biological aspect of Descendant (IBD)
* Inferred from Key Residues (IKR)
* Inferred from Rapid Divergence (IRD)
* Inferred from Sequence or structural Similarity (ISS)
* Inferred from Sequence Orthology (ISO)
* Inferred from Sequence Alignment (ISA)
* Inferred from Sequence Model (ISM)
* Inferred from Genomic Context (IGC)
* Inferred from Reviewed Computational Analysis (RCA)
* Traceable Author Statement (TAS)
* Non-traceable Author Statement (NAS)
* Inferred by Curator (IC)
* No biological Data available (ND)
* Inferred from Electronic Annotation (IEA)
SELECT - which columns to include in the result <br>
FROM - which tables to use <br>
WHERE (optional) - predicate clause, which rows to include <br>
ORDER BY (optional) - indicates a sort order for the output data
```
sql = '''
SELECT _id, go_id
FROM go_bp
WHERE evidence="ND"
ORDER BY _id DESC
LIMIT 20;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
#curs.fetchall()
#for row in curs: print(row)
```
COUNT returns a single number, which is the count of all rows in the table
```
sql = '''
SELECT count(*) FROM genes;
'''
curs.execute(sql)
curs.fetchall()
sql = '''
SELECT count(_id) AS 'Number of genes'
FROM genes;
'''
curs.execute(sql)
print(get_header(curs))
curs.fetchall()[0][0]
```
DISTINCT selects non-duplicated elements (rows)
```
sql = '''
SELECT _id FROM go_bp LIMIT 20;
'''
curs.execute(sql)
curs.fetchall()
sql = '''
SELECT DISTINCT _id FROM go_bp LIMIT 10;
'''
curs.execute(sql)
curs.fetchall()
#count the number of rows on go_bp
sql = '''
SELECT count(*)
FROM go_bp;
'''
curs.execute(sql)
result = curs.fetchall()
result[0][0]
#len(result)
sql = '''
SELECT DISTINCT _id FROM go_bp;
'''
curs.execute(sql)
result = curs.fetchall()
len(result)
```
WHERE clause operators <br>
https://www.sqlite.org/lang_expr.html
<> , != inequality <br>
< less than <br>
<= less than or equal <br>
= equal <br>
'> greater than <br>
'>= greater than or equal <br>
BETWEEN v1 AND v2 tests that a value to lies in a given range <br>
EXISTS test for existence of rows matching query <br>
IN tests if a value falls within a given set or query <br>
IS [ NOT ] NULL is or is not null <br>
[ NOT ] LIKE tests value to see if like or not like another <br>
% is the wildcard in SQL, used in conjunction with LIKE
```
sql = '''
SELECT * FROM go_bp
WHERE _id = '1';
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
sql = '''
SELECT * FROM go_bp
WHERE _id IN (1,5,7);
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
sql = '''
SELECT * FROM go_bp
WHERE evidence = 'ND' AND _id BETWEEN 20 AND 2000
LIMIT 10
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
sql = '''
SELECT *
FROM go_bp
WHERE go_id LIKE '%0081%'
LIMIT 10;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
# Retrieve rows from go_bp where the go_id is GO:0008104 and evidence is IEA or IDA
```
Sqlite3 also has some PRAGMA methods <br>
SQL extension specific to SQLite and used to modify the operation of the SQLite library or to query the SQLite library for internal (non-table) data <br>
https://www.sqlite.org/pragma.html <br>
The code below shows how to get the schema (columns and columns information)
```
sql = 'PRAGMA table_info("go_bp")'
curs.execute(sql)
curs.fetchall()
sql = '''SELECT * FROM pragma_table_info("go_bp") '''
curs.execute(sql)
curs.fetchall()
sql = '''
SELECT _id, symbol, gene_name
FROM gene_info
WHERE _id IN
(SELECT DISTINCT _id
FROM go_bp
WHERE go_id == 'GO:0008104');
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
```
##### GROUP BY groups by a column and creates summary data for a different column
```
sql = '''
SELECT go_id, count(*) FROM go_bp GROUP BY go_id LIMIT 10;
'''
curs.execute(sql)
curs.fetchall()
sql = '''
SELECT go_id, count(_id) as gene_no FROM go_bp GROUP BY go_id LIMIT 10;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
```
##### HAVING allows restrictions on the rows used or selected
```
sql = '''
SELECT go_id, count(_id) as gene_no FROM go_bp GROUP BY go_id
HAVING gene_no>500;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
# Select gene ids with more than 100 biological processes associated
```
#### See the create table statement
```
sql = '''
SELECT name,sql
FROM sqlite_master
WHERE type= "table" and name == "go_bp"
LIMIT 2;
'''
curs.execute(sql)
print(get_header(curs))
for row in curs.fetchall():
print('\t'.join([str(elem) for elem in row ]))
print(row[1])
curs.close()
conn.close()
```
| github_jupyter |
# Greybox Fuzzing with Grammars
<!--
Previously, we have learned about [mutational fuzzing](GreyboxFuzzer.ipynb), which generates new inputs by mutating seed inputs. Most mutational fuzzers represent inputs as a sequence of bytes and apply byte-level mutations to this byte sequence. Such byte-level mutations work great for compact file formats with a small number of structural constraints. However, most file formats impose a high-level structure on these byte sequences.
Common components of a regular file are file header, data chunks, checksums, data fields, and meta data. Only if this file structure is correctly reflected will the file be accepted by the parser. Otherwise, the file is quickly rejected before reaching interesting parts in the program. It is not easy to generate valid files by [random fuzzing](Fuzzer.ipynb). For instance, only a tiniest proportion of random strings are valid PDF files or valid JPEG image files.
-->
<!--
Maybe we can start with a valid file and generate new valid files by small mutations applied to the original file? Indeed, this is the main insight of ([blackbox](MutationFuzzer.ipynb) and [greybox](GreyboxFuzzer.ipynb)) mutational fuzzing. However, many file formats are so complex that even small modifications lead to invalid inputs that are quickly rejected by the parser.
-->
In this chapter, we introduce two important extensions to our syntactic fuzzing techniques:
1. We show how to combine [parsing](Parser.ipynb) and [fuzzing](GrammarFuzzer.ipynb) with grammars. This allows to _mutate_ existing inputs while preserving syntactical correctness, and to _reuse_ fragments from existing inputs while generating new ones. The combination of parsing and fuzzing, as demonstrated in this chapter, has been highly successful in practice: The _LangFuzz_ fuzzer for JavaScript has found more than 2,600 bugs in JavaScript interpreters this way.
2. In the previous chapters, we have used grammars in a _black-box_ manner – that is, we have used them to generate inputs regardless of the program being tested. In this chapter, we introduce mutational _greybox fuzzing with grammars_: Techniques that make use of _feedback from the program under test_ to guide test generations towards specific goals. As in [lexical greybox fuzzing](GreyboxFuzzer.ipynb), this feedback is mostly _coverage_, allowing us to direct grammar-based testing towards uncovered code parts.
<!--
In this chapter, we encode file formats as [grammars](Grammars.ipynb) and make the mutational fuzzer input-structure-aware. We investigate opportunities to inform the fuzzer about the validity of the generated inputs. Specifically, we explore dictionaries, grammars, structural mutators, and validity-based power schedules
-->
**Prerequisites**
* We build on several concepts from [the chapter on greybox fuzzing (without grammars)](GreyboxFuzzer.ipynb).
* As the title suggests, you should know how to fuzz with grammars [from the chapter on grammars](Grammars.ipynb).
## Background
First, we [recall](GreyboxFuzzer.ipynb#Ingredients-for-Greybox-Fuzzing) a few basic ingredients for mutational fuzzers.
* **Seed**. A _seed_ is an input that is used by the fuzzer to generate new inputs by applying a sequence of mutations.
* **Mutator**. A _mutator_ implements a set of mutation operators that applied to an input produce a slightly modified input.
* **PowerSchedule**. A _power schedule_ assigns _energy_ to a seed. A seed with higher energy is fuzzed more often throughout the fuzzing campaign.
* **MutationFuzzer**. Our _mutational blackbox fuzzer_ generates inputs by mutating seeds in an initial population of inputs.
* **GreyboxFuzzer**. Our _greybox fuzzer_ dynamically adds inputs to the population of seeds that increased coverage.
* **FunctionCoverageRunner**. Our _function coverage runner_ collects coverage information for the execution of a given Python function.
Let's try to get a feeling for these concepts.
```
import fuzzingbook_utils
from GreyboxFuzzer import Mutator, Seed, PowerSchedule, MutationFuzzer, GreyboxFuzzer
from MutationFuzzer import FunctionCoverageRunner
```
The following command applies a mutation to the input "Hello World".
```
Mutator().mutate("Hello World")
```
The default power schedule assigns energy uniformly across all seeds. Let's check whether this works.
We choose 10k times from a population of three seeds. As we see in the `hits` counter, each seed is chosen about a third of the time.
```
population = [Seed("A"), Seed("B"), Seed("C")]
schedule = PowerSchedule()
hits = {
"A" : 0,
"B" : 0,
"C" : 0
}
for i in range(10000):
seed = schedule.choose(population)
hits[seed.data] += 1
hits
```
Before explaining the function coverage runner, lets import Python's HTML parser as example...
```
from html.parser import HTMLParser
```
... and create a _wrapper function_ that passes each input into a new parser object.
```
def my_parser(inp):
parser = HTMLParser()
parser.feed(inp)
```
The `FunctionCoverageRunner` constructor takes a Python `function` to execute. The function `run()` takes an input, passes it on to the Python `function`, and collects the coverage information for this execution. The function `coverage()` returns a list of tuples `(function name, line number)` for each statement that has been covered in the Python `function`.
```
runner = FunctionCoverageRunner(my_parser)
runner.run("Hello World")
cov = runner.coverage()
list(cov)[:5] # Print 5 statements covered in HTMLParser
```
Our greybox fuzzer takes a seed population, mutator, and power schedule. Let's generate 5000 fuzz inputs starting with an "empty" seed corpus.
```
import time
import random
n = 5000
seed_input = " " # empty seed
runner = FunctionCoverageRunner(my_parser)
fuzzer = GreyboxFuzzer([seed_input], Mutator(), PowerSchedule())
start = time.time()
fuzzer.runs(runner, trials=n)
end = time.time()
"It took the fuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n)
"During this fuzzing campaign, we covered %d statements." % len(runner.coverage())
```
## Building a Keyword Dictionary
To fuzz our HTML parser, it may be useful to inform a mutational fuzzer about important keywords in the input – that is, important HTML keywords. To this end, we extend our mutator to consider keywords from a _dictionary_.
```
class DictMutator(Mutator):
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
self.mutators.append(self.insert_from_dictionary)
def insert_from_dictionary(self,s):
"""Returns s with a keyword from the dictionary inserted"""
pos = random.randint(0, len(s))
random_keyword = random.choice(self.dictionary)
return s[:pos] + random_keyword + s[pos:]
```
Let's try to add a few HTML tags and attributes and see whether the coverage with `DictMutator` increases.
```
runner = FunctionCoverageRunner(my_parser)
dict_mutator = DictMutator(["<a>","</a>","<a/>", "='a'"])
dict_fuzzer = GreyboxFuzzer([seed_input], dict_mutator, PowerSchedule())
start = time.time()
dict_fuzzer.runs(runner, trials = n)
end = time.time()
"It took the fuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n)
```
Clearly, it takes longer. In our experience, this means more code is covered:
```
"During this fuzzing campaign, we covered %d statements." % len(runner.coverage())
```
How do the fuzzers compare in terms of coverage over time?
```
from Coverage import population_coverage
import matplotlib.pyplot as plt
_, dict_cov = population_coverage(dict_fuzzer.inputs, my_parser)
_, fuzz_cov = population_coverage(fuzzer.inputs, my_parser)
line_dict, = plt.plot(dict_cov, label="With Dictionary")
line_fuzz, = plt.plot(fuzz_cov, label="Without Dictionary")
plt.legend(handles=[line_dict, line_fuzz])
plt.xlim(0,n)
plt.title('Coverage over time')
plt.xlabel('# of inputs')
plt.ylabel('lines covered');
```
<!-- \todo{Andreas: Section on mining keywords using parser-directed fuzzing or AUTOGRAM?} -->
***Summary.*** Informing the fuzzer about important keywords already goes a long way towards achieving lots of coverage quickly.
***Try it.*** Open this chapter as Jupyter notebook and add other HTML-related keywords to the dictionary in order to see whether the difference in coverage actually increases (given the same budget of 5k generated test inputs).
***Read up.*** Michał Zalewski, author of AFL, wrote several great blog posts on [making up grammars with a dictionary in hand](https://lcamtuf.blogspot.com/2015/01/afl-fuzz-making-up-grammar-with.html) and [pulling JPEGs out of thin air](https://lcamtuf.blogspot.com/2014/11/pulling-jpegs-out-of-thin-air.html)!
## Fuzzing with Input Fragments
While dictionaries are helpful to inject important keywords into seed inputs, they do not allow to maintain the structural integrity of the generated inputs. Instead, we need to make the fuzzer aware of the _input structure_. We can do this using [grammars](Grammars.ipynb). Our first approach
1. [parses](Parser.ipynb) the seed inputs,
2. disassembles them into input fragments, and
3. generates new files by reassembling these fragments according to the rules of the grammar.
This combination of _parsing_ and _fuzzing_ can be very powerful, as we will see in an instant
### Parsing and Recombining JavaScript, or How to Make 50,000 USD in Four Weeks
In "Fuzzing with Code Fragments" \cite{Holler2012}, Holler, Herzig, and Zeller apply these steps to fuzz a JavaScript interpreter. They use a JavaScript grammar to
1. _parse_ (valid) JavaScript inputs into parse trees,
2. _disassemble_ them into fragments (subtrees),
3. _recombine_ these fragments into valid JavaScript programs again, and
4. _feed_ these programs into a JavaScript interpreter for execution.
As in most fuzzing scenarios, the aim is to cause the JavaScript interpreter to crash. Here is an example of LangFuzz-generated JavaScript code (from \cite{Holler2012}) that caused a crash in the Mozilla JavaScript interpreter:
```javascript
var haystack = "foo";
var re_text = "^foo";
haystack += "x";
re_text += "(x)";
var re = new RegExp(re_text);
re.test(haystack);
RegExp.input = Number();
print(RegExp.$1);
```
From a crash of the JavaScript interpreter, it is frequently possible to construct an *exploit* that will not only crash the interpreter, but instead have it execute code under the attacker's control. Therefore, such crashes are serious flaws, which is why you get a bug bounty if you report them.
In the first four weeks of running his _LangFuzz_ tool, Christian Holler, first author of that paper, netted _more than USD 50,000 in bug bounties_. To date, LangFuzz has found more than 2,600 bugs in the JavaScript browsers of Mozilla Firefox, Google Chrome, and Microsoft Edge. If you use any of these browsers (say, on your Android phone), the combination of parsing and fuzzing has contributed significantly in making your browsing session secure.
(Note that these are the same Holler and Zeller who are co-authors of this book. If you ever wondered why we devote a couple of chapters on grammar-based fuzzing, that's because we have had some great experience with it.)
### Parsing and Recombining HTML
In this book, let us stay with HTML input for a while. To generate valid HTML inputs for our Python `HTMLParser`, we should first define a simple grammar. It allows to define HTML tags with attributes. Our context-free grammar does not require that opening and closing tags must match. However, we will see that such context-sensitive features can be maintained in the derived input fragments, and thus in the generated inputs.
```
import string
from Grammars import is_valid_grammar, srange
XML_TOKENS = {"<id>","<text>"}
XML_GRAMMAR = {
"<start>": ["<xml-tree>"],
"<xml-tree>": ["<text>",
"<xml-open-tag><xml-tree><xml-close-tag>",
"<xml-openclose-tag>",
"<xml-tree><xml-tree>"],
"<xml-open-tag>": ["<<id>>", "<<id> <xml-attribute>>"],
"<xml-openclose-tag>": ["<<id>/>", "<<id> <xml-attribute>/>"],
"<xml-close-tag>": ["</<id>>"],
"<xml-attribute>" : ["<id>=<id>", "<xml-attribute> <xml-attribute>"],
"<id>": ["<letter>", "<id><letter>"],
"<text>" : ["<text><letter_space>","<letter_space>"],
"<letter>": srange(string.ascii_letters + string.digits +"\""+"'"+"."),
"<letter_space>": srange(string.ascii_letters + string.digits +"\""+"'"+" "+"\t"),
}
assert is_valid_grammar(XML_GRAMMAR)
```
In order to parse an input into a derivation tree, we use the [Earley parser](Parser.ipynb#Parsing-Context-Free-Grammars).
```
from Parser import EarleyParser
from GrammarFuzzer import display_tree
```
Let's run the parser on a simple HTML input and display all possible parse trees. A *parse tree* represents the input structure according to the given grammar.
```
parser = EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS)
for tree in parser.parse("<html>Text</html>"):
display_tree(tree)
```
As we can see, the input starts with an opening tag, contains some text, and ends with a closing tag. Excellent. This is a structure that we can work with.
### Building the Fragment Pool
We are now ready to implement our first input-structure-aware mutator. Let's initialize the mutator with the dictionary `fragments` representing the empty fragment pool. It contains a key for each symbol in the grammar (and the empty set as value).
```
class FragmentMutator(Mutator):
def __init__(self, parser):
"""Initialize empty fragment pool and add parser"""
self.parser = parser
self.fragments = {k: [] for k in self.parser.cgrammar}
super().__init__()
```
The `FragmentMutator` adds fragments recursively. A *fragment* is a subtree in the parse tree and consists of the symbol of the current node and child nodes (i.e., descendant fragments). We can exclude fragments starting with symbols that are tokens, terminals, or not part of the grammar.
```
from Parser import terminals
class FragmentMutator(FragmentMutator):
def add_fragment(self, fragment):
"""Recursively adds fragments to the fragment pool"""
(symbol, children) = fragment
if not self.is_excluded(symbol):
self.fragments[symbol].append(fragment)
for subfragment in children:
self.add_fragment(subfragment)
def is_excluded(self, symbol):
"""Returns true if a fragment starting with a specific
symbol and all its decendents can be excluded"""
return ((not symbol in self.parser.grammar()) or
symbol in self.parser.tokens or
symbol in terminals(self.parser.grammar()))
```
Parsing can take a long time, particularly if there is too much ambiguity during the parsing. In order to maintain the efficiency of mutational fuzzing, we will limit the parsing time to 200ms.
```
import signal
class Timeout(Exception): pass
def timeout(signum, frame):
raise Timeout()
# Register timeout() as handler for signal 'SIGALRM'"
signal.signal(signal.SIGALRM, timeout);
```
The function `add_to_fragment_pool()` parses a seed (no longer than 200ms) and adds all its fragments to the fragment pool. If the parsing the `seed` was successful, the attribute `seed.has_structure` is set to `True`. Otherwise, it is set to `False`.
<!-- \todo{Convert this to `ExpectTimeout` (or make ExpectTimeout more efficient)} -->
```
class FragmentMutator(FragmentMutator):
def add_to_fragment_pool(self, seed):
"""Adds all fragments of a seed to the fragment pool"""
try: # only allow quick parsing of 200ms max
signal.setitimer(signal.ITIMER_REAL, 0.2)
seed.structure = next(self.parser.parse(seed.data))
signal.setitimer(signal.ITIMER_REAL, 0)
self.add_fragment(seed.structure)
seed.has_structure = True
except (SyntaxError, Timeout):
seed.has_structure = False
signal.setitimer(signal.ITIMER_REAL, 0)
```
Let's see how `FragmentMutator` fills the fragment pool for a simple HTML seed input. We initialize mutator with the `EarleyParser` which itself is initialized with our `XML_GRAMMAR`.
```
from GrammarFuzzer import tree_to_string
valid_seed = Seed("<html><header><title>Hello</title></header><body>World<br/></body></html>")
fragment_mutator = FragmentMutator(EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS))
fragment_mutator.add_to_fragment_pool(valid_seed)
for key in fragment_mutator.fragments:
print(key)
for f in fragment_mutator.fragments[key]:
print("|-%s" % tree_to_string(f))
```
For many symbols in the grammar, we have collected a number of fragments. There are several open and closing tags and several interesting fragments starting with the `xml-tree` symbol.
***Summary***. For each interesting symbol in the grammar, the `FragmentMutator` has a set of fragments. These fragments are extracted by first parsing the inputs to be mutated.
### Fragment-Based Mutation
We can use the fragments in the fragment pool to generate new inputs. Every seed that is being mutated is disassembled into fragments, and memoized – i.e., disassembled only the first time around.
```
class FragmentMutator(FragmentMutator):
def __init__(self, parser):
"""Initialize mutators"""
super().__init__(parser)
self.seen_seeds = []
def mutate(self, seed):
"""Implement structure-aware mutation. Memoize seeds."""
if not seed in self.seen_seeds:
self.seen_seeds.append(seed)
self.add_to_fragment_pool(seed)
return super().mutate(seed)
```
Our first structural mutation operator is `swap_fragments()`, which choses a random fragment in the given seed and substitutes it with a random fragment from the pool. We make sure that both fragments start with the same symbol. For instance, we may swap a closing tag in the seed HTML by another closing tag from the fragment pool.
In order to choose a random fragment, the mutator counts all fragments (`n_count`) below the root fragment associated with the start-symbol.
```
class FragmentMutator(FragmentMutator):
def count_nodes(self, fragment):
"""Returns the number of nodes in the fragment"""
symbol, children = fragment
if self.is_excluded(symbol):
return 0
return 1 + sum(map(self.count_nodes, children))
```
In order to swap the chosen fragment – identified using the "global" variable `self.to_swap` – the seed's parse tree is traversed recursively.
```
class FragmentMutator(FragmentMutator):
def recursive_swap(self, fragment):
"""Recursively finds the fragment to swap."""
symbol, children = fragment
if self.is_excluded(symbol):
return symbol, children
self.to_swap -= 1
if self.to_swap == 0:
return random.choice(list(self.fragments[symbol]))
return symbol, list(map(self.recursive_swap, children))
```
Our structural mutator chooses a random number between 2 (i.e., excluding the `start` symbol) and the total number of fragments (`n_count`) and uses the recursive swapping to generate the new fragment. The new fragment is serialized as string and returned as new seed.
```
class FragmentMutator(FragmentMutator):
def __init__(self, parser):
super().__init__(parser)
self.mutators = [self.swap_fragment]
def swap_fragment(self, seed):
"""Substitutes a random fragment with another with the same symbol"""
if seed.has_structure:
n_nodes = self.count_nodes(seed.structure)
self.to_swap = random.randint(2, n_nodes)
new_structure = self.recursive_swap(seed.structure)
new_seed = Seed(tree_to_string(new_structure))
new_seed.has_structure = True
new_seed.structure = new_structure
return new_seed
return seed
valid_seed = Seed("<html><header><title>Hello</title></header><body>World<br/></body></html>")
lf_mutator = FragmentMutator(parser)
print(valid_seed)
lf_mutator.mutate(valid_seed)
```
As we can see, one fragment has been substituted by another.
We can use a similar recursive traversal to *remove* a random fragment.
```
class FragmentMutator(FragmentMutator):
def recursive_delete(self, fragment):
"""Recursively finds the fragment to delete"""
symbol, children = fragment
if self.is_excluded(symbol):
return symbol, children
self.to_delete -= 1
if self.to_delete == 0:
return symbol, []
return symbol, list(map(self.recursive_delete, children))
```
We should also define the corresponding structural deletion operator, as well.
```
class FragmentMutator(FragmentMutator):
def __init__(self, parser):
super().__init__(parser)
self.mutators.append(self.delete_fragment)
def delete_fragment(self, seed):
"""Deletes a random fragment"""
if seed.has_structure:
n_nodes = self.count_nodes(seed.structure)
self.to_delete = random.randint(2, n_nodes)
new_structure = self.recursive_delete(seed.structure)
new_seed = Seed(tree_to_string(new_structure))
new_seed.has_structure = True
new_seed.structure = new_structure
# do not return an empty new_seed
if not new_seed.data: return seed
else: return new_seed
return seed
```
***Summary***. We now have all ingredients for structure-aware fuzzing. Our mutator disassembles all seeds into fragments, which are then added to the fragment pool. Our mutator swaps random fragments in a given seed with fragments of the same type. And our mutator deletes random fragments in a given seed. This allows to maintain a high degree of validity for the generated inputs w.r.t. the given grammar.
***Try it***. Try adding other structural mutation operators. How would an *add-operator* know the position in a given seed file, where it is okay to add a fragment starting with a certain symbol?
### Fragment-Based Fuzzing
We can now define a input-structure aware fuzzer as pioneered in LangFuzzer. To implement LangFuzz, we modify our [blackbox mutational fuzzer](GreyboxFuzzer.ipynb#Blackbox-Mutation-based-Fuzzer) to stack up to four structural mutations.
```
class LangFuzzer(MutationFuzzer):
def create_candidate(self):
"""Returns an input generated by fuzzing a seed in the population"""
candidate = self.schedule.choose(self.population)
trials = random.randint(1,4)
for i in range(trials):
candidate = self.mutator.mutate(candidate)
return candidate
```
Okay, let's take our first input-structure aware fuzzer for a spin. Being careful, we set n=300 for now.
```
n = 300
runner = FunctionCoverageRunner(my_parser)
mutator = FragmentMutator(EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS))
schedule = PowerSchedule()
langFuzzer = LangFuzzer([valid_seed.data], mutator, schedule)
start = time.time()
langFuzzer.runs(runner, trials = n)
end = time.time()
"It took LangFuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n)
```
We observe that structural mutation is *sooo very slow*. This is despite our time budget of 200ms for parsing. In contrast, our blackbox fuzzer alone can generate about 10k inputs per second!
```
runner = FunctionCoverageRunner(my_parser)
mutator = Mutator()
schedule = PowerSchedule()
blackFuzzer = MutationFuzzer([valid_seed.data], mutator, schedule)
start = time.time()
blackFuzzer.runs(runner, trials = n)
end = time.time()
"It took a blackbox fuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n)
```
Indeed, our blackbox fuzzer is done in the blink of an eye.
***Try it***. We can deal with this overhead using [deferred parsing](https://arxiv.org/abs/1811.09447). Instead of wasting time in the beginning of the fuzzing campaign when a byte-level mutator would make efficient progress, deferred parsing suggests to invest time in structural mutation only later in the fuzzing campaign when it becomes viable.
```
blackbox_coverage = len(runner.coverage())
"During this fuzzing campaign, the blackbox fuzzer covered %d statements." % blackbox_coverage
```
Let's print some stats for our fuzzing campaigns. Since we'll need to print stats more often later, we should wrap this into a function. In order to measure coverage, we import the [population_coverage](Coverage.ipynb#Coverage-of-Basic-Fuzzing) function. It takes a set of inputs and a Python function, executes the inputs on that function and collects coverage information. Specifically, it returns a tuple `(all_coverage, cumulative_coverage)` where `all_coverage` is the set of statements covered by all inputs, and `cumulative_coverage` is the number of statements covered as the number of executed inputs increases. We are just interested in the latter to plot coverage over time.
```
from Coverage import population_coverage
def print_stats(fuzzer, parser):
coverage, _ = population_coverage(fuzzer.inputs, my_parser)
has_structure = 0
for seed in fuzzer.inputs:
# reuse memoized information
if hasattr(seed, "has_structure"):
if seed.has_structure:
has_structure += 1
else:
if isinstance(seed, str):
seed = Seed(seed)
try:
signal.setitimer(signal.ITIMER_REAL, 0.2)
next(parser.parse(seed.data))
signal.setitimer(signal.ITIMER_REAL, 0)
has_structure += 1
except (SyntaxError, Timeout):
signal.setitimer(signal.ITIMER_REAL, 0)
print("From the %d generated inputs, %d (%0.2f%%) can be parsed.\n"
"In total, %d statements are covered." % (
len(fuzzer.inputs),
has_structure,
100 * has_structure / len(fuzzer.inputs),
len(coverage)))
```
For LangFuzzer, let's see how many of the inputs generated by LangFuzz are valid (i.e., parsable) and how many statements were covered.
```
print_stats(langFuzzer, EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS))
```
What are the stats for the mutational fuzzer that uses only byte-level mutation (and no grammars)?
```
print_stats(blackFuzzer, EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS))
```
***Summary***. Our fragment-level blackbox fuzzer (LangFuzzer) generates *more valid inputs* but achieves *less code coverage* than a fuzzer with our byte-level fuzzer. So, there is some value in generating inputs that do not stick to the provided grammar.
### Integration with Greybox Fuzzing
In the following we integrate fragment-level blackbox fuzzing (LangFuzz-style) with [byte-level greybox fuzzing](GreyboxFuzzer.ipynb#Greybox-Mutation-based-Fuzzer) (AFL-style). The additional coverage-feedback might allow us to increase code coverage more quickly.
A [greybox fuzzer](GreyboxFuzzer.ipynb#Greybox-Mutation-based-Fuzzer) adds to the seed population all generated inputs which increase code coverage. Inputs are generated in two stages, stacking up to four structural mutations and up to 32 byte-level mutations.
```
class GreyboxGrammarFuzzer(GreyboxFuzzer):
def __init__(self, seeds, byte_mutator, tree_mutator, schedule):
super().__init__(seeds, byte_mutator, schedule)
self.tree_mutator = tree_mutator
def create_candidate(self):
"""Returns an input generated by structural mutation of a seed in the population"""
seed = self.schedule.choose(self.population)
# Structural mutation
trials = random.randint(0,4)
for i in range(trials):
seed = self.tree_mutator.mutate(seed)
# Byte-level mutation
candidate = seed.data
if trials == 0 or not seed.has_structure or 1 == random.randint(0, 1):
dumb_trials = min(len(seed.data), 1 << random.randint(1,5))
for i in range(dumb_trials):
candidate = self.mutator.mutate(candidate)
return candidate
```
Let's run our integrated fuzzer with the [standard byte-level mutator](GreyboxFuzzer.ipynb#Mutator-and-Seed) and our [fragment-based structural mutator](#Fragment-based-Mutation) that was introduced above.
```
runner = FunctionCoverageRunner(my_parser)
byte_mutator = Mutator()
tree_mutator = FragmentMutator(EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS))
schedule = PowerSchedule()
gg_fuzzer = GreyboxGrammarFuzzer([valid_seed.data], byte_mutator, tree_mutator, schedule)
start = time.time()
gg_fuzzer.runs(runner, trials = n)
end = time.time()
"It took the greybox grammar fuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n)
print_stats(gg_fuzzer, EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS))
```
***Summary***. Our structural greybox fuzzer
* runs faster than the fragment-based LangFuzzer,
* achieves more coverage than both the fragment-based LangFuzzer and the vanilla blackbox mutational fuzzer, and
* generates fewer valid inputs than even the vanilla blackbox mutational fuzzer.
## Mutating Invalid Seeds
In the previous section, we have seen that most inputs that are added as seeds are *invalid* w.r.t. our given grammar. Yet, in order to apply our fragment-based mutators, we need it to parse the seed successfully. Otherwise, the entire fragment-based approach becomes useless. The question arises: *How can we derive structure from (invalid) seeds that cannot be parsed successfully?*
To this end, we introduce the idea of _region-based mutation_, first explored with the [AFLSmart](https://github.com/aflsmart/aflsmart) structural greybox fuzzer \cite{Pham2018aflsmart}. AFLSmart implements byte-level, fragment-based, and region-based mutation as well as validity-based power schedules. We define *region-based mutators*, where a *region* is a consecutive sequence of bytes in the input that can be associated with a symbol in the grammar.
### Determining Symbol Regions
The function `chart_parse` of the [Earley parser](Parser.ipynb#The-Parsing-Algorithm) produces a parse table for a string. For each letter in the string, this table gives the potential symbol and a *region* of neighboring letters that might belong to the same symbol.
```
invalid_seed = Seed("<html><body><i>World</i><br/>>/body></html>")
parser = EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS)
table = parser.chart_parse(invalid_seed.data, parser.start_symbol())
for column in table:
print(column)
print("---")
```
The number of columns in this table that are associated with potential symbols correspond to the number of letters that could be parsed successfully. In other words, we can use this table to compute the longest parsable substring.
```
cols = [col for col in table if col.states]
parsable = invalid_seed.data[:len(cols)-1]
print("'%s'" % invalid_seed)
parsable
```
From this, we can compute the *degree of validity* for an input.
```
validity = 100 * len(parsable) / len(invalid_seed.data)
"%0.1f%% of the string can be parsed successfully." % validity
```
***Summary***. Unlike input fragments, input regions can be derived even if the parser fails to generate the entire parse tree.
### Region-based Mutation
To fuzz invalid seeds, the region-based mutator associates symbols from the grammar with regions (i.e., indexed substrings) in the seed. The [overridden](#Building-the-Fragment-Pool) method `add_to_fragment_pool()` first tries to mine the fragments from the seed. If this fails, the region mutator uses [Earley parser](Parser.ipynb#The-Parsing-Algorithm) to derive the parse table. For each column (i.e., letter), it extracts the symbols and corresponding regions. This allows the mutator to store the set of regions with each symbol.
```
class RegionMutator(FragmentMutator):
def add_to_fragment_pool(self, seed):
"""Mark fragments and regions in a seed file"""
super().add_to_fragment_pool(seed)
if not seed.has_structure:
try:
signal.setitimer(signal.ITIMER_REAL, 0.2) # set 200ms timeout
seed.regions = {k: set() for k in self.parser.cgrammar}
for column in self.parser.chart_parse(seed.data, self.parser.start_symbol()):
for state in column.states:
if (not self.is_excluded(state.name) and
state.e_col.index - state.s_col.index > 1 and
state.finished()):
seed.regions[state.name].add((state.s_col.index, state.e_col.index))
signal.setitimer(signal.ITIMER_REAL, 0) # cancel timeout
seed.has_regions = True
except Timeout:
seed.has_regions = False
else:
seed.has_regions = False
```
This is how these regions look like for our invalid seed. A region consists of a start and end index in the seed string.
```
mutator = RegionMutator(parser)
mutator.add_to_fragment_pool(invalid_seed)
for symbol in invalid_seed.regions:
print(symbol)
for (s, e) in invalid_seed.regions[symbol]:
print("|-(%d,%d) : %s" % (s, e, invalid_seed.data[s:e]))
```
Now that we know which regions in the seed belong to which symbol, we can define region-based swap and delete operators.
```
class RegionMutator(RegionMutator):
def swap_fragment(self, seed):
"""Chooses a random region and swaps it with a fragment
that starts with the same symbol"""
if not seed.has_structure and seed.has_regions:
regions = [r for r in seed.regions
if (len(seed.regions[r]) > 0 and
len(self.fragments[r]) > 0)]
if len(regions) == 0: return seed
key = random.choice(list(regions))
s, e = random.choice(list(seed.regions[key]))
swap_structure = random.choice(self.fragments[key])
swap_string = tree_to_string(swap_structure)
new_seed = Seed(seed.data[:s] + swap_string + seed.data[e:])
new_seed.has_structure = False
new_seed.has_regions = False
return new_seed
else:
return super().swap_fragment(seed)
class RegionMutator(RegionMutator):
def delete_fragment(self, seed):
"""Deletes a random region"""
if not seed.has_structure and seed.has_regions:
regions = [r for r in seed.regions
if len(seed.regions[r]) > 0]
if len(regions) == 0: return seed
key = random.choice(list(regions))
s, e = (0, 0)
while (e - s < 2):
s, e = random.choice(list(seed.regions[key]))
new_seed = Seed(seed.data[:s] + seed.data[e:])
new_seed.has_structure = False
new_seed.has_regions = False
return new_seed
else:
return super().delete_fragment(seed)
```
Let's try our new region-based mutator. We add a simple, valid seed to the fragment pool and attempt to mutate the invalid seed.
```
simple_seed = Seed("<b>Text</b>")
mutator = RegionMutator(parser)
mutator.add_to_fragment_pool(simple_seed)
print(invalid_seed)
mutator.mutate(invalid_seed)
```
***Summary***. We can use the Earley parser to generate a parse table and assign regions in the input to symbols in the grammar. Our region mutators can substitute these region with fragments from the fragment pool that start with the same symbol, or delete these regions entirely.
***Try it***. Implement a region pool (similar to the fragment pool) and a `swap_region()` mutator.
You can execute your own code by opening this chapter as Jupyter notebook.
### Region-Based Fuzzing
Let's try our shiny new region mutator by integrating it with our [structure-aware greybox fuzzer](#Integration-with-Greybox-Fuzzing).
```
runner = FunctionCoverageRunner(my_parser)
byte_mutator = Mutator()
tree_mutator = RegionMutator(EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS))
schedule = PowerSchedule()
regionFuzzer = GreyboxGrammarFuzzer([valid_seed.data], byte_mutator, tree_mutator, schedule)
start = time.time()
regionFuzzer.runs(runner, trials = n)
end = time.time()
"It took the structural greybox fuzzer with region mutator\
%0.2f seconds to generate and execute %d inputs." % (end - start, n)
```
We can see that the structural greybox fuzzer with region-based mutator is slower than the [fragment-based mutator alone](#Fragment-based-Fuzzing). This is because region-based structural mutation is applicable for *all seeds*. In contrast, fragment-based mutators were applicable only for tiny number of parsable seeds. Otherwise, only (very efficient) byte-level mutators were applied.
Let's also print the average degree of validity for the seeds in the population.
```
def print_more_stats(fuzzer, parser):
print_stats(fuzzer, parser)
validity = 0
total = 0
for seed in fuzzer.population:
if not seed.data: continue
table = parser.chart_parse(seed.data, parser.start_symbol())
cols = [col for col in table if col.states]
parsable = invalid_seed.data[:len(cols)-1]
validity += len(parsable) / len(seed.data)
total += 1
print("On average, %0.1f%% of a seed in the population can be successfully parsed." % (100 * validity / total))
print_more_stats(regionFuzzer, parser)
```
***Summary***. Compared to fragment-based mutation, a greybox fuzzer with region-based mutation achieves *higher coverage* but generates a *smaller number of valid inputs*. The higher coverage is explained by leveraging at least *some* structure for seeds that cannot be parsed successfully.
## Focusing on Valid Seeds
In the previous section, we have a problem: The low (degree of) validity. To address this problem, a _validity-based power schedule_ assigns more [energy](GreyboxFuzzer.ipynb#Power-Schedules) to seeds that have a higher degree of validity. In other words, the fuzzer _spends more time fuzzing seeds that are more valid_.
```
import math
class AFLSmartSchedule(PowerSchedule):
def __init__(self, parser, exponent):
self.parser = parser
self.exponent = exponent
def parsable(self, seed):
"""Returns the substring that is parsable"""
table = self.parser.chart_parse(seed.data, parser.start_symbol())
cols = [col for col in table if col.states]
return seed.data[:len(cols)-1]
def degree_of_validity(self, seed):
"""Returns the proportion of a seed that is parsable"""
if hasattr(seed, "validity"): return seed.validity
seed.validity = (len(self.parsable(seed)) / len(seed.data)
if len(seed.data) > 0 else 0)
return seed.validity
def assignEnergy(self, population):
"""Assign exponential energy proportional to degree of validity"""
for seed in population:
seed.energy = ((self.degree_of_validity(seed) / math.log(len(seed.data))) ** self.exponent
if len(seed.data) > 1 else 0)
```
Let's play with the degree of validity by passing in a valid seed ...
```
smart_schedule = AFLSmartSchedule(parser, 1)
print("%11s: %s" % ("Entire seed", simple_seed))
print("%11s: %s" % ("Parsable", smart_schedule.parsable(simple_seed)))
"Degree of validity: %0.2f%%" % (100 * smart_schedule.degree_of_validity(simple_seed))
```
... and an invalid seed.
```
print("%11s: %s" % ("Entire seed", invalid_seed))
print("%11s: %s" % ("Parsable", smart_schedule.parsable(invalid_seed)))
"Degree of validity: %0.2f%%" % (100 * smart_schedule.degree_of_validity(invalid_seed))
```
Excellent. We can compute the degree of validity as the proportion of the string that can be parsed.
Let's plug the validity-based power schedule into the structure-aware greybox fuzzer.
```
runner = FunctionCoverageRunner(my_parser)
byte_mutator = Mutator()
tree_mutator = RegionMutator(EarleyParser(XML_GRAMMAR, tokens=XML_TOKENS))
schedule = AFLSmartSchedule(parser, 1)
aflsmart = GreyboxGrammarFuzzer([valid_seed.data], byte_mutator, tree_mutator, schedule)
start = time.time()
aflsmart.runs(runner, trials = n)
end = time.time()
"It took AFLSmart %0.2f seconds to generate and execute %d inputs." % (end - start, n)
print_more_stats(aflsmart, parser)
```
***Summary***. Indeed, by spending more time fuzzing seeds with a higher degree of validity, we also generate inputs with a higher degree of validity. More inputs are entirely valid w.r.t. the given grammar.
***Read up***. Learn more about region-based fuzzing, deferred parsing, and validity-based schedules in the original AFLSmart paper: "[Smart Greybox Fuzzing](https://arxiv.org/abs/1811.09447)" by Pham et al.. Download and improve AFLSmart: [https://github.com/aflsmart/aflsmart](https://github.com/aflsmart/aflsmart).
## Mining Seeds
By now, it should have become clear that the _choice of seeds_ can very much influence the success of fuzzing. One aspect is _variability_ – our seeds should cover as many different features as possible in order to increase coverage. Another aspect, however, is the _likelihood of a seed to induce errors_ – that is, if a seed was involved in causing a failure before, then a mutation of this very seed may be likely to induce failures again. This is because fixes for past failures typically are successful in letting the concrete failure no longer occur, but sometimes may fail to capture all conditions under which a failure may occur. Hence, even if the original failure is fixed, the likelihood of an error in the _surroundings_ of the original failure-inducing input is still higher. It thus pays off to use as seeds _inputs that are known to have caused failures before_.
To put things in context, Holler's _LangFuzz_ fuzzer used as seeds JavaScript inputs from CVE reports. These were published as failure-inducing inputs at a time when the error already had been fixed; thus they could do no harm anymore. Yet, by using such inputs as seeds, LangFuzz would create plenty of mutations and recombinations of all their features, many of which would (and do) find errors again and again.
## Lessons Learned
* A **dictionary** is useful to inject important keywords into the generated inputs.
* **Fragment-based mutation** first disassembles seeds into fragments, and reassembles these fragments to generate new inputs. A *fragment* is a subtree in the seed's parse tree. However, fragment-based mutation requires that the seeds can be parsed successfully, which may not be true for seeds discovered by a coverage-based greybox fuzzer.
* **Region-based mutation** marks regions in the input as belonging to a certain symbol in the grammar. For instance, it may identify a substring '</a>' as closing tag. These regions can than be deleted or substituted by fragments or regions belonging to the same symbol. Unlike fragment-based mutation, region-based mutation is applicable to *all* seeds - even those that can be parsed only partially. However, the degree of validity is still quite low for the generated inputs.
* A **validity-based power schedule** invests more energy into seeds with a higher degree of validity. The inputs that are generated also have a higher degree of validity.
* **Mining seeds** from repositories of previous failure-inducing inputs results in input fragments associated with past failures, raising the likelihood to find more failures in the vicinity.
## Next Steps
This chapter closes our discussion of syntactic fuzzing techniques.
* In the [next chapter](Reducer.ipynb), we discuss how to _reduce failure-inducing inputs_ after a failure, keeping only those portions of the input that are necessary for reproducing the failure.
* The [next part](04_Semantical_Fuzzing.ipynb) will go from syntactical to _semantical_ fuzzing, considering code semantics for targeted test generation.
## Background
This chapter builds on the following two works:
* The _LangFuzz_ fuzzer \cite{Holler2012} is an efficient (and effective!) grammar-based fuzzer for (mostly) JavaScript. It uses the grammar for parsing seeds and recombining their inputs with generated parts and found 2,600 bugs in JavaScript interpreters to date.
* Smart greybox fuzzing ([AFLSmart](https://github.com/aflsmart/aflsmart)) brings together coverage-based fuzzing and grammar-based (structural) fuzzing, as described in \cite{Pham2018aflsmart}. The resulting AFLSMART tool has discovered 42 zero-day vulnerabilities in widely-used, well-tested tools and libraries; so far 17 CVEs were assigned.
Recent fuzzing work also brings together grammar-based fuzzing and coverage.
* _Superion_ \cite{Wang2019superion} is equivalent to our section "Integration with Greybox Fuzzing", as above – that is, a combination of LangFuzz and Greybox Fuzzing, but no AFL-style byte-level mutation. Superion can improve the code coverage (i.e., 16.7% and 8.8% in line and function coverage) and bug-finding capability over AFL and jsfunfuzz. According to the authors, they found 30 new bugs, among which they discovered 21 new vulnerabilities with 16 CVEs assigned and 3.2K USD bug bounty rewards received.
* _Nautilus_ \cite{Aschermann2019nautilus} also combines grammar-based fuzzing with coverage feedback. It maintains the parse tree for all seeds and generated inputs. To allow AFL-style byte-level mutations, it "collapses" subtrees back to byte-level representations. This has the advantage of not having to re-parse generated seeds; however, over time, Nautilus de-generates to structure-unaware greybox fuzzing because it does not re-parse collapsed subtrees to reconstitute input structure for later seeds where most of the parse tree is collapsed. Nautilus identified bugs in mruby, PHP, ChakraCore, and in Lua; reporting these bugs was awarded with a sum of 2600 USD and 6 CVEs were assigned.
## Exercises
### Exercise 1: The Big Greybox Fuzzer Shoot-Out
Use our implementations of greybox techniques and evaluate them on a benchmark. Which technique (and which sub-technique) has which impact and why? Also take into account the specific approaches of Superion \cite{Wang2019superion} and Nautilus \cite{Aschermann2019nautilus}, possibly even on the benchmarks used by these approaches.
**Solution.** To be added by Summer 2019.
| github_jupyter |
# Fundamentals of image processing using scikit image
NumPy is the fundamental package for scientific computing with Python. It contains among other things:
- a powerful N-dimensional array object
- sophisticated (broadcasting) functions
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
https://numpy.org/
Scikit-image is a collection of algorithms for image processing. It contains:
- algorithms for image filtering, registration, and segmentation amoungst others
- great tutorials and examples gallery
https://scikit-image.org
Before we can doing image processing with `scikit-image`, we need to understand how images are represented. For this, we will use NumPy and material from their introductory tutorial here - https://numpy.org/devdocs/user/quickstart.html
# Introduction to NumPy
NumPy’s main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type, indexed by a tuple of positive integers. In NumPy dimensions are called axes.
For example, the coordinates of a point in 3D space [1, 2, 1] has one axis. That axis has 3 elements in it, so we say it has a length of 3.
In the example pictured below, the array has 2 axes. The first axis has a length of 2, the second axis has a length of 3.
```
[[ 1., 0., 0.],
[ 0., 1., 2.]]
```
NumPy’s array class is called `ndarray`. The more important attributes of an ndarray object are:
- `ndarray.ndim`
the number of axes (dimensions) of the array.
- `ndarray.shape`
the dimensions of the array. This is a tuple of integers indicating the size of the array in each dimension. For a matrix with n rows and m columns, shape will be (n,m). The length of the shape tuple is therefore the number of axes, ndim.
- `ndarray.size`
the total number of elements of the array. This is equal to the product of the elements of shape.
- `ndarray.dtype`
an object describing the type of the elements in the array. One can create or specify dtype’s using standard Python types. Additionally NumPy provides types of its own. numpy.int32, numpy.int16, and numpy.float64 are some examples.
## An example with a random array
First we import NumPy and then we can create a random 2D array and inspect some of its propteries
```
import numpy as np
a = np.random.random((3, 5))
print(a)
print(a.ndim)
print(a.shape)
print(a.dtype.name)
print(type(a))
```
## Array creation
There are several ways to create arrays.
For example, you can create an array from a regular Python list or tuple using the array function. The type of the resulting array is deduced from the type of the elements in the sequences.
```
a = np.array([2,3,4])
print(a)
print(a.dtype)
b = np.array([1.2, 3.5, 5.1])
print(b)
print(b.dtype)
```
A frequent error consists in calling array with multiple numeric arguments, rather than providing a single list of numbers as an argument.
```
np.array(1,2,3,4) # ERROR
np.array([1,2,3,4]) # RIGHT
```
array transforms sequences of sequences into two-dimensional arrays, sequences of sequences of sequences into three-dimensional arrays, and so on.
```
b = np.array([(1.5,2,3), (4,5,6)])
print(b)
print(b.ndim)
```
The function zeros creates an array full of zeros, the function ones creates an array full of ones, and the function empty creates an array whose initial content is random and depends on the state of the memory. By default, the dtype of the created array is float64.
```
a = np.zeros((3, 4))
print(a)
print(a.ndim)
print(a.shape)
print(a.dtype.name)
a = np.ones((2,3,4), dtype=np.int16) # dtype can also be specified
print(a)
print(a.ndim)
print(a.shape)
print(a.dtype.name)
```
To create sequences of numbers, NumPy provides a function analogous to range that returns arrays instead of lists.
```
np.arange(10, 30, 5)
```
It is often better to use the function linspace that receives as an argument the number of elements that we want, instead of the step
```
np.linspace(0, 2, 9) # 9 numbers from 0 to 2
```
## Basic Operations
Arithmetic operators on arrays apply *elementwise*. A new array is created and filled with the result.
```
a = np.array([20, 30, 40, 50])
b = np.arange(4)
print('a is:', a)
print('b is:', b)
c = a-b
print('c is:', c)
b**2
10*np.sin(a)
a<35
```
Unlike in many matrix languages, the product operator * operates elementwise in NumPy arrays. The matrix product can be performed using the @ operator or the dot function or method:
```
A = np.array([[1, 1],
[0, 1]])
B = np.array([[2, 0],
[3, 4]])
A * B # elementwise product
A @ B # matrix product
A.dot(B) # another matrix product
```
Some operations, such as += and *=, act in place to modify an existing array rather than create a new one.
```
a = np.ones((2,3), dtype=int)
a
a *= 3
a
b = np.random.random((2,3))
b
b += a
b
```
When operating with arrays of different types, the type of the resulting array corresponds to the more general or precise one (a behavior known as upcasting).
```
a += b # ERROR - b is not automatically converted to integer type
```
Many unary operations, such as computing the sum of all the elements in the array, are implemented as methods of the ndarray class.
```
a = np.random.random((2,3))
a
a.sum()
a.min()
a.max()
```
By default, these operations apply to the array as though it were a list of numbers, regardless of its shape. However, by specifying the axis parameter you can apply an operation along the specified axis of an array:
```
b = np.arange(12).reshape(3,4)
b
b.sum(axis=0) # sum of each column
b.min(axis=1) # min of each row
b.cumsum(axis=1) # cumulative sum along each row
```
## Universal Functions
NumPy provides familiar mathematical functions such as sin, cos, and exp. In NumPy, these are called “universal functions”(ufunc). Within NumPy, these functions operate elementwise on an array, producing an array as output.
```
B = np.arange(3)
B
np.exp(B)
np.sqrt(B)
C = np.array([2., -1., 4.])
np.add(B, C)
```
## Indexing, Slicing and Iterating
One-dimensional arrays can be indexed, sliced and iterated over, much like lists and other Python sequences.
```
a = np.arange(10)**3
a
a[2]
a[2:5]
a[:6:2] = -1000 #from start to position 6, exclusive, set every 2nd element to -1000
a
a[ : :-1] # reversed a
for i in a:
print(i / 3)
```
Multidimensional arrays can have one index per axis. These indices are given in a tuple separated by commas:
```
def f(x,y):
return 10*x+y
b = np.fromfunction(f,(5,4),dtype=int)
b
b[2,3]
b[0:5, 1] # each row in the second column of b
b[ : ,1] # equivalent to the previous example
b[1:3, : ] # each column in the second and third row of b
```
When fewer indices are provided than the number of axes, the missing indices are considered complete slices:
```
b[-1] # the last row. Equivalent to b[-1,:]
```
The expression within brackets in `b[i]` is treated as an i followed by as many instances of : as needed to represent the remaining axes. NumPy also allows you to write this using dots as `b[i,...]`.
The dots (...) represent as many colons as needed to produce a complete indexing tuple. For example, if x is an array with 5 axes, then
- `x[1,2,...]` is equivalent to `x[1,2,:,:,:]`,
- `x[...,3]` to `x[:,:,:,:,3]` and
- `x[4,...,5,:]` to `x[4,:,:,5,:]`.
```
c = np.array([[[ 0, 1, 2], # a 3D array (two stacked 2D arrays)
[ 10, 12, 13]],
[[100,101,102],
[110,112,113]]])
c.shape
c[1,...] # same as c[1,:,:] or c[1]
c[...,2] # same as c[:,:,2]
```
Iterating over multidimensional arrays is done with respect to the first axis:
```
b
for row in b:
print(row)
for element in b.flat:
print(element)
```
## Shape Manipulation
### Changing the shape of an array
An array has a shape given by the number of elements along each axis:
```
a = np.floor(10*np.random.random((3,4)))
a
a.shape
```
The shape of an array can be changed with various commands. Note that the following three commands all return a modified array, but do not change the original array:
```
a.ravel() # returns the array, flattened
a.reshape(6,2) # returns the array with a modified shape
a.T # returns the array, transposed
a.T.shape
```
### Stacking together different arrays
Several arrays can be stacked together along different axes:
```
a = np.floor(10*np.random.random((2,2)))
a
b = np.floor(10*np.random.random((2,2)))
b
np.vstack((a, b))
np.hstack((a, b))
c = np.floor(10*np.random.random((2,2)))
c
d = np.stack([a, b, c], axis=2)
d
d.shape
```
# Images are just numpy arrays
Images are represented in ``scikit-image`` using standard ``numpy`` arrays. This allows maximum inter-operability with other libraries in the scientific Python ecosystem, such as ``matplotlib`` and ``scipy``.
Let's see how to build a grayscale image as a 2D array:
```
import numpy as np
from matplotlib import pyplot as plt
```
Make sure our plots appear inline
```
%matplotlib inline
random_image = np.random.random([500, 500])
plt.imshow(random_image, cmap='gray')
plt.colorbar();
```
The same holds for "real-world" images:
```
from skimage import data
coins = data.coins()
print('Type:', type(coins))
print('dtype:', coins.dtype)
print('shape:', coins.shape)
plt.imshow(coins, cmap='gray');
```
A color image is a 3D array, where the last dimension has size 3 and represents the red, green, and blue channels:
```
astro = data.astronaut()
print("Shape:", astro.shape)
print("Values min/max:", astro.min(), astro.max())
plt.imshow(astro);
```
These are *just NumPy arrays*. E.g., we can make a red square by using standard array slicing and manipulation:
```
astro[10:110, 10:110, :] = [255, 0, 0] # [red, green, blue]
plt.imshow(astro);
```
Images can also include transparent regions by adding a 4th dimension, called an *alpha layer*.
### Other shapes, and their meanings
|Image type|Coordinates|
|:---|:---|
|2D grayscale|(row, column)|
|2D multichannel|(row, column, channel)|
|3D grayscale (or volumetric) |(plane, row, column)|
|3D multichannel|(plane, row, column, channel)|
## Displaying images using matplotlib
```
from skimage import data
img0 = data.chelsea()
img1 = data.rocket()
import matplotlib.pyplot as plt
f, (ax0, ax1) = plt.subplots(1, 2, figsize=(20, 10))
ax0.imshow(img0)
ax0.set_title('Cat', fontsize=18)
ax0.scatter([10, 10, 10, 10], [10, 20, 30, 40], color='white')
ax0.axis('off')
ax1.imshow(img1)
ax1.set_title('Rocket', fontsize=18)
ax1.set_xlabel(r'Launching position $\alpha=320$')
ax1.vlines([202, 300], 0, img1.shape[0], colors='magenta',
linewidth=3, label='Side tower position')
ax1.plot([168, 190, 200], [400, 200, 300], color='white',
linestyle='--', marker='o', label='Side angle')
ax1.legend();
```
For more on plotting, see the [Matplotlib documentation](https://matplotlib.org/gallery/index.html#images-contours-and-fields) and [pyplot API](https://matplotlib.org/api/pyplot_summary.html).
## Data types and image values
In literature, one finds different conventions for representing image values:
```
0 - 255 where 0 is black, 255 is white
0 - 1 where 0 is black, 1 is white
```
``scikit-image`` supports both conventions--the choice is determined by the
data-type of the array.
E.g., here, I generate two valid images:
```
linear0 = np.linspace(0, 1, 2500).reshape((50, 50))
linear1 = np.linspace(0, 255, 2500).reshape((50, 50)).astype(np.uint8)
print("Linear0:", linear0.dtype, linear0.min(), linear0.max())
print("Linear1:", linear1.dtype, linear1.min(), linear1.max())
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(15, 15))
ax0.imshow(linear0, cmap='gray')
ax1.imshow(linear1, cmap='gray');
```
The library is designed in such a way that any data-type is allowed as input,
as long as the range is correct (0-1 for floating point images, 0-255 for unsigned bytes,
0-65535 for unsigned 16-bit integers).
You can convert images between different representations by using ``img_as_float``, ``img_as_ubyte``, etc.:
```
from skimage import img_as_float, img_as_ubyte
image = data.chelsea()
image_ubyte = img_as_ubyte(image)
image_float = img_as_float(image)
print("type, min, max:", image_ubyte.dtype, image_ubyte.min(), image_ubyte.max())
print("type, min, max:", image_float.dtype, image_float.min(), image_float.max())
print()
print("231/255 =", 231/255.)
```
Your code would then typically look like this:
```python
def my_function(any_image):
float_image = img_as_float(any_image)
# Proceed, knowing image is in [0, 1]
```
We recommend using the floating point representation, given that
``scikit-image`` mostly uses that format internally.
## Image I/O
Mostly, we won't be using input images from the scikit-image example data sets. Those images are typically stored in JPEG or PNG format. Since scikit-image operates on NumPy arrays, *any* image reader library that provides arrays will do. Options include imageio, matplotlib, pillow, etc.
scikit-image conveniently wraps many of these in the `io` submodule, and will use whichever of the libraries mentioned above are installed:
```
from skimage import io
image = io.imread('../data/balloon.jpg')
print(type(image))
print(image.dtype)
print(image.shape)
print(image.min(), image.max())
plt.imshow(image);
```
We also have the ability to load multiple images, or multi-layer TIFF images.
```
cells = io.imread('../data/cells.tif')
print('"cells" shape: {}'.format(cells.shape))
print('"cells" type: {}'.format(cells.dtype))
print('"cells" range: {}, {}'.format(cells.min(), cells.max()))
```
We see that `cells` has 60 planes, each with 256 rows and 256 columns. We can visualize one of the 2D planes with:
```
plt.imshow(cells[32], cmap='gray');
```
# Exercises
## <span class="exercize">Exercise: visualizing RGB channels</span>
Display the different color channels of the image along (each as a gray-scale image). Start with the following template:
```
# --- read in the image ---
image = plt.imread('..data/Bells-Beach.jpg')
# --- assign each color channel to a different variable ---
r = ...
g = ...
b = ...
# --- display the image and r, g, b channels ---
f, axes = plt.subplots(1, 4, figsize=(16, 5))
for ax in axes:
ax.axis('off')
(ax_r, ax_g, ax_b, ax_color) = axes
ax_r.imshow(r, cmap='gray')
ax_r.set_title('red channel')
ax_g.imshow(g, cmap='gray')
ax_g.set_title('green channel')
ax_b.imshow(b, cmap='gray')
ax_b.set_title('blue channel')
# --- Here, we stack the R, G, and B layers again
# to form a color image ---
ax_color.imshow(np.stack([r, g, b], axis=2))
ax_color.set_title('all channels');
```
Now, take a look at the following R, G, and B channels. How would their combination look? (Write some code to confirm your intuition.)
```
from skimage import draw
red = np.zeros((300, 300))
green = np.zeros((300, 300))
blue = np.zeros((300, 300))
r, c = draw.circle(100, 100, 100)
red[r, c] = 1
r, c = draw.circle(100, 200, 100)
green[r, c] = 1
r, c = draw.circle(200, 150, 100)
blue[r, c] = 1
f, axes = plt.subplots(1, 3)
for (ax, channel) in zip(axes, [red, green, blue]):
ax.imshow(channel, cmap='gray')
ax.axis('off')
# Hint: np.stack([...], axis=2)
```
## Exercise: Convert to grayscale ("black and white")
The *relative luminance* of an image is the intensity of light coming from each point. Different colors contribute differently to the luminance: it's very hard to have a bright, pure blue, for example. So, starting from an RGB image, the luminance is given by:
$$
Y = 0.2126R + 0.7152G + 0.0722B
$$
Use Python 3.5's matrix multiplication, `@`, to convert an RGB image to a grayscale luminance image according to the formula above.
Compare your results to that obtained with `skimage.color.rgb2gray`.
Change the coefficients to 1/3 (i.e., take the mean of the red, green, and blue channels, to see how that approach compares with `rgb2gray`).
```
from skimage import color, img_as_float
image = img_as_float(io.imread('../data/balloon.jpg'))
gray = color.rgb2gray(image)
my_gray = ...
# --- display the results ---
f, (ax0, ax1) = plt.subplots(1, 2, figsize=(10, 6))
ax0.imshow(gray, cmap='gray')
ax0.set_title('skimage.color.rgb2gray')
ax1.imshow(my_gray, cmap='gray')
ax1.set_title('my rgb2gray')
```
## <span class="exercize">Exercise: draw the letter H</span>
Define a function that takes as input an RGB image and a pair of coordinates (row, column), and returns a copy with a green letter H overlaid at those coordinates. The coordinates point to the top-left corner of the H.
The arms and strut of the H should have a width of 5 pixels, and the H itself should have a height of 36 pixels and width of 30 pixels.
Start with the following template:
```
def draw_H(image, coords, color=(0, 255, 0)):
out = image.copy()
...
return out
```
Test your function like so:
```
cat = data.chelsea()
cat_H = draw_H(cat, (50, -50))
plt.imshow(cat_H);
```
| github_jupyter |
# Generador de pokemons
Este script simulará la aparición de pokemons de una manera pseudoaleatoria. Los pokemos se pueden repetir, aparecerán aproximadamente cada `MEAN_INTERVAL` segundos, y aparecerán en el entorno de las coordenadas definidas en `COORDINATES`.
```
import pandas as pd
from confluent_kafka import Producer
import time
import json
import random
from ejercicios.pokemons import SEED, TOPIC_POKEMONS, DATA, COORDINATES, MEAN_INTERVAL
random.seed(SEED)
ds = pd.read_csv(DATA)
ds = ds.drop(labels=['#', 'Type 2', 'Total', 'Sp. Atk', 'Sp. Def', 'Generation'], axis=1)
ds = ds.rename({'Type 1': 'Type'}, axis=1)
ds.head()
pokemon = ds.sample(n=1)
as_dict = pokemon.to_dict('records')[0]
pokemon
as_dict
GAUSS_LAT_MADRID = COORDINATES['GAUSS_LAT_MADRID']
GAUSS_LON_MADRID = COORDINATES['GAUSS_LON_MADRID']
GAUSS_LAT_SEGOVIA = COORDINATES['GAUSS_LAT_SEGOVIA']
GAUSS_LON_SEGOVIA = COORDINATES['GAUSS_LON_SEGOVIA']
as_dict['lat'] = random.gauss(**GAUSS_LAT_MADRID)
as_dict['lon'] = random.gauss(**GAUSS_LON_MADRID)
as_dict
json.dumps(as_dict)
TOPIC_POKEMONS
p = Producer({'bootstrap.servers': 'localhost:9092'})
def delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
pass
# print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
while True:
pokemon = ds.sample(n=1)
pokemon = pokemon.to_dict('records')[0]
if random.randrange(0, 10, 1) < 5:
pokemon['lat'] = random.gauss(**GAUSS_LAT_MADRID)
pokemon['lon'] = random.gauss(**GAUSS_LON_MADRID)
else:
pokemon['lat'] = random.gauss(**GAUSS_LAT_SEGOVIA)
pokemon['lon'] = random.gauss(**GAUSS_LON_SEGOVIA)
# execute any available delivery report callbacks from previous produce() calls
p.poll(0)
# Asynchronously produce a message, the delivery report callback
# will be triggered from poll() above, or flush() below, when the message has
# been successfully delivered or failed permanently.
print("Sending pokemon {Name}:{Type}:{lat}:{lon}".format(**pokemon))
p.produce(
TOPIC_POKEMONS,
json.dumps(pokemon).encode('utf-8'),
callback=delivery_report
)
time.sleep(abs(random.gauss(MEAN_INTERVAL, 1)))
# Wait for any outstanding messages to be delivered and delivery report
# callbacks to be triggered.
p.flush()
```
| github_jupyter |
```
import torch
from torch.autograd import grad
import torch.nn as nn
from numpy import genfromtxt
import torch.optim as optim
import matplotlib.pyplot as plt
import torch.nn.functional as F
sidr_data = genfromtxt('covid_100_pts.csv', delimiter=',') #in the form of [t,S,I,D,R]
torch.manual_seed(1234)
%%time
PATH = 'covid_lr5_step_100_'
class DINN(nn.Module):
def __init__(self, t, S_data, I_data, D_data, R_data): #[t,S,I,D,R]
super(DINN, self).__init__()
self.N = 59e6 #population size
self.t = torch.tensor(t, requires_grad=True)
self.t_float = self.t.float()
self.t_batch = torch.reshape(self.t_float, (len(self.t),1)) #reshape for batch
self.S = torch.tensor(S_data)
self.I = torch.tensor(I_data)
self.D = torch.tensor(D_data)
self.R = torch.tensor(R_data)
self.losses = []
self.save = 3 #which file to save to
self.alpha_tilda = torch.nn.Parameter(torch.rand(1, requires_grad=True)) #0.191
self.beta_tilda = torch.nn.Parameter(torch.rand(1, requires_grad=True)) #0.05
self.gamma_tilda = torch.nn.Parameter(torch.rand(1, requires_grad=True)) #0.0294
#find values for normalization
self.S_max = max(self.S)
self.I_max = max(self.I)
self.D_max = max(self.D)
self.R_max = max(self.R)
self.S_min = min(self.S)
self.I_min = min(self.I)
self.D_min = min(self.D)
self.R_min = min(self.R)
#unnormalize
self.S_hat = (self.S - self.S_min) / (self.S_max - self.S_min)
self.I_hat = (self.I - self.I_min) / (self.I_max - self.I_min)
self.D_hat = (self.D - self.D_min) / (self.D_max - self.D_min)
self.R_hat = (self.R - self.R_min) / (self.R_max - self.R_min)
#matrices (x4 for S,I,D,R) for the gradients
self.m1 = torch.zeros((len(self.t), 4)); self.m1[:, 0] = 1
self.m2 = torch.zeros((len(self.t), 4)); self.m2[:, 1] = 1
self.m3 = torch.zeros((len(self.t), 4)); self.m3[:, 2] = 1
self.m4 = torch.zeros((len(self.t), 4)); self.m4[:, 3] = 1
#NN
self.net_sidr = self.Net_sidr()
self.params = list(self.net_sidr.parameters())
self.params.extend(list([self.alpha_tilda, self.beta_tilda, self.gamma_tilda]))
#force parameters to be in a range
@property
def alpha(self):
return torch.tanh(self.alpha_tilda)*0.191*20
@property
def beta(self):
return torch.tanh(self.beta_tilda)*0.05*20
@property
def gamma(self):
return torch.tanh(self.gamma_tilda)*0.0294*20
#nets
class Net_sidr(nn.Module): # input = [t]
def __init__(self):
super(DINN.Net_sidr, self).__init__()
self.fc1=nn.Linear(1, 20) #takes 100 t's
self.fc2=nn.Linear(20, 20)
self.fc3=nn.Linear(20, 20)
self.fc4=nn.Linear(20, 20)
self.out=nn.Linear(20, 4) #outputs S, I, D, R
def forward(self, t_batch):
sidr=F.relu(self.fc1(t_batch))
sidr=F.relu(self.fc2(sidr))
sidr=F.relu(self.fc3(sidr))
sidr=F.relu(self.fc4(sidr))
sidr=self.out(sidr)
return sidr
def net_f(self, t_batch):
sidr_hat = self.net_sidr(t_batch)
S_hat, I_hat, D_hat, R_hat = sidr_hat[:,0], sidr_hat[:,1], sidr_hat[:,2], sidr_hat[:,3]
#S_t
sidr_hat.backward(self.m1, retain_graph=True)
S_hat_t = self.t.grad.clone()
self.t.grad.zero_()
#I_t
sidr_hat.backward(self.m2, retain_graph=True)
I_hat_t = self.t.grad.clone()
self.t.grad.zero_()
#D_t
sidr_hat.backward(self.m3, retain_graph=True)
D_hat_t = self.t.grad.clone()
self.t.grad.zero_()
#R_t
sidr_hat.backward(self.m4, retain_graph=True)
R_hat_t = self.t.grad.clone()
self.t.grad.zero_()
#unnormalize
S = self.S_min + (self.S_max - self.S_min) * S_hat
I = self.I_min + (self.I_max - self.I_min) * I_hat
D = self.D_min + (self.D_max - self.D_min) * D_hat
R = self.R_min + (self.R_max - self.R_min) * R_hat
f1_hat = S_hat_t - (-(self.alpha / self.N) * S * I) / (self.S_max - self.S_min)
f2_hat = I_hat_t - ((self.alpha / self.N) * S * I - self.beta * I - self.gamma * I ) / (self.I_max - self.I_min)
f3_hat = D_hat_t - (self.gamma * I) / (self.D_max - self.D_min)
f4_hat = R_hat_t - (self.beta * I ) / (self.R_max - self.R_min)
return f1_hat, f2_hat, f3_hat, f4_hat, S_hat, I_hat, D_hat, R_hat
def load(self):
# Load checkpoint
try:
checkpoint = torch.load(PATH + str(self.save)+'.pt')
print('\nloading pre-trained model...')
self.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
epoch = checkpoint['epoch']
self.losses = checkpoint['losses']
except RuntimeError :
print('changed the architecture, ignore')
pass
except FileNotFoundError:
pass
def train(self, n_epochs):
#try loading
self.load()
#train
print('\nstarting training...\n')
for epoch in range(n_epochs):
#lists to hold the output (maintain only the final epoch)
S_pred_list = []
I_pred_list = []
D_pred_list = []
R_pred_list = []
f1, f2, f3, f4, S_pred, I_pred, D_pred, R_pred = self.net_f(self.t_batch)
self.optimizer.zero_grad()
S_pred_list.append(self.S_min + (self.S_max - self.S_min) * S_pred)
I_pred_list.append(self.I_min + (self.I_max - self.I_min) * I_pred)
D_pred_list.append(self.D_min + (self.D_max - self.D_min) * D_pred)
R_pred_list.append(self.R_min + (self.R_max - self.R_min) * R_pred)
loss = (torch.mean(torch.square(self.S_hat - S_pred))+
torch.mean(torch.square(self.I_hat - I_pred))+
torch.mean(torch.square(self.D_hat - D_pred))+
torch.mean(torch.square(self.R_hat - R_pred))+
torch.mean(torch.square(f1))+
torch.mean(torch.square(f2))+
torch.mean(torch.square(f3))+
torch.mean(torch.square(f4))
)
loss.backward()
self.optimizer.step()
self.scheduler.step()
self.losses.append(loss.item())
if epoch % 1000 == 0:
print('\nEpoch ', epoch)
#loss + model parameters update
if epoch % 4000 == 0:
#checkpoint save
print('\nSaving model... Loss is: ', loss)
torch.save({
'epoch': epoch,
'model': self.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
#'loss': loss,
'losses': self.losses,
}, PATH + str(self.save)+'.pt')
if self.save % 2 > 0: #its on 3
self.save = 2 #change to 2
else: #its on 2
self.save = 3 #change to 3
print('epoch: ', epoch)
print('alpha: (goal 0.191 ', self.alpha)
print('beta: (goal 0.05 ', self.beta)
print('gamma: (goal 0.0294 ', self.gamma)
if loss < 4e-4:
break
return S_pred_list, I_pred_list, D_pred_list, R_pred_list
%%time
dinn = DINN(sidr_data[0], sidr_data[1], sidr_data[2], sidr_data[3],
sidr_data[4]) #in the form of [t,S,I,D,R]
learning_rate = 1e-5
optimizer = optim.Adam(dinn.params, lr = learning_rate)
dinn.optimizer = optimizer
scheduler = torch.optim.lr_scheduler.CyclicLR(dinn.optimizer, base_lr=1e-5, max_lr=1e-3, step_size_up=100, mode="exp_range", gamma=0.85, cycle_momentum=False)
dinn.scheduler = scheduler
try:
S_pred_list, I_pred_list, D_pred_list, R_pred_list = dinn.train(700000) #train
except EOFError:
if dinn.save == 2:
dinn.save = 3
S_pred_list, I_pred_list, D_pred_list, R_pred_list = dinn.train(700000) #train
elif dinn.save == 3:
dinn.save = 2
S_pred_list, I_pred_list, D_pred_list, R_pred_list = dinn.train(700000) #train
plt.plot(dinn.losses[300000:], color = 'teal')
plt.xlabel('Epochs')
plt.ylabel('Loss'),
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111, facecolor='#dddddd', axisbelow=True)
ax.set_facecolor('xkcd:white')
ax.plot(sidr_data[0], sidr_data[1], 'pink', alpha=0.5, lw=2, label='Susceptible')
ax.plot(sidr_data[0], S_pred_list[0].detach().numpy(), 'red', alpha=0.9, lw=2, label='Susceptible Prediction', linestyle='dashed')
ax.plot(sidr_data[0], sidr_data[2], 'violet', alpha=0.5, lw=2, label='Infected')
ax.plot(sidr_data[0], I_pred_list[0].detach().numpy(), 'dodgerblue', alpha=0.9, lw=2, label='Infected Prediction', linestyle='dashed')
ax.plot(sidr_data[0], sidr_data[3], 'darkgreen', alpha=0.5, lw=2, label='Dead')
ax.plot(sidr_data[0], D_pred_list[0].detach().numpy(), 'green', alpha=0.9, lw=2, label='Dead Prediction', linestyle='dashed')
ax.plot(sidr_data[0], sidr_data[4], 'blue', alpha=0.5, lw=2, label='Recovered')
ax.plot(sidr_data[0], R_pred_list[0].detach().numpy(), 'teal', alpha=0.9, lw=2, label='Recovered Prediction', linestyle='dashed')
ax.set_xlabel('Time /days')
ax.set_ylabel('Number')
#ax.set_ylim([-1,50])
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='black', lw=0.2, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.savefig('itswhite.pdf')
plt.show()
#calculate relative MSE loss
import math
import numpy as np
S_total_loss = 0
S_den = 0
I_total_loss = 0
I_den = 0
D_total_loss = 0
D_den = 0
R_total_loss = 0
R_den = 0
t = np.linspace(0, 500, 100)
for timestep in range(len(t)):
S_value = sidr_data[1][timestep] - S_pred_list[0].detach().numpy()[timestep]
S_total_loss += S_value**2
S_den += (sidr_data[1][timestep])**2
I_value = sidr_data[2][timestep] - I_pred_list[0].detach().numpy()[timestep]
I_total_loss += I_value**2
I_den += (sidr_data[2][timestep])**2
D_value = sidr_data[3][timestep] - D_pred_list[0].detach().numpy()[timestep]
D_total_loss += D_value**2
D_den += (sidr_data[3][timestep])**2
R_value = sidr_data[4][timestep] - R_pred_list[0].detach().numpy()[timestep]
R_total_loss += R_value**2
R_den += (sidr_data[4][timestep])**2
S_total_loss = math.sqrt(S_total_loss/S_den)
I_total_loss = math.sqrt(I_total_loss/I_den)
D_total_loss = math.sqrt(D_total_loss/D_den)
R_total_loss = math.sqrt(R_total_loss/R_den)
print('S_total_loss: ', S_total_loss)
print('I_total_loss: ', I_total_loss)
print('D_total_loss: ', D_total_loss)
print('R_total_loss: ', R_total_loss)
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# Initial conditions
N = 59e6
S0 = N - 1
I0 = 1
D0 = 0
R0 = 0
# A grid of time points (in days)
t = np.linspace(0, 500, 100)
#parameters
alpha = dinn.alpha
beta = dinn.beta
gamma = dinn.gamma
# The SIR model differential equations.
def deriv(y, t, alpha, betta, gamma):
S, I, D, R = y
dSdt = - (alpha / N) * S * I
dIdt = (alpha / N) * S * I - beta * I - gamma * I
dDdt = gamma * I
dRdt = beta * I
return dSdt, dIdt, dDdt, dRdt
# Initial conditions vector
y0 = S0, I0, D0, R0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(alpha, beta, gamma))
S, I, D, R = ret.T
# Plot the data on two separate curves for S(t), I(t)
fig = plt.figure(facecolor='w', figsize=(12,12))
ax = fig.add_subplot(111, facecolor='#dddddd', axisbelow=True)
ax.plot(t, S, 'violet', alpha=0.5, lw=2, label='Learnable Param Susceptible', linestyle='dashed')
ax.plot(t, sidr_data[1], 'dodgerblue', alpha=0.5, lw=2, label='Susceptible')
ax.plot(t, I, 'darkgreen', alpha=0.5, lw=2, label='Learnable Param Infected', linestyle='dashed')
ax.plot(t, sidr_data[2], 'gold', alpha=0.5, lw=2, label='Susceptible')
ax.plot(t, D, 'red', alpha=0.5, lw=2, label='Learnable Param Dead', linestyle='dashed')
ax.plot(t, sidr_data[3], 'salmon', alpha=0.5, lw=2, label='Dead')
ax.plot(t, R, 'blue', alpha=0.5, lw=2, label='Learnable Param Recovered', linestyle='dashed')
ax.plot(t, sidr_data[4], 'wheat', alpha=0.5, lw=2, label='Recovered')
ax.set_xlabel('Time /days')
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.show()
#calculate relative MSE loss
import math
S_total_loss = 0
S_den = 0
I_total_loss = 0
I_den = 0
D_total_loss = 0
D_den = 0
R_total_loss = 0
R_den = 0
for timestep in range(len(t)):
S_value = sidr_data[1][timestep] - S[timestep]
S_total_loss += S_value**2
S_den += (sidr_data[1][timestep])**2
I_value = sidr_data[2][timestep] - I[timestep]
I_total_loss += I_value**2
I_den += (sidr_data[2][timestep])**2
D_value = sidr_data[3][timestep] - D[timestep]
D_total_loss += D_value**2
D_den += (sidr_data[3][timestep])**2
R_value = sidr_data[4][timestep] - R[timestep]
R_total_loss += R_value**2
R_den += (sidr_data[4][timestep])**2
S_total_loss = math.sqrt(S_total_loss/S_den)
I_total_loss = math.sqrt(I_total_loss/I_den)
D_total_loss = math.sqrt(D_total_loss/D_den)
R_total_loss = math.sqrt(R_total_loss/R_den)
print('S_total_loss: ', S_total_loss)
print('I_total_loss: ', I_total_loss)
print('D_total_loss: ', D_total_loss)
print('R_total_loss: ', R_total_loss)
```
| github_jupyter |
# Derive Analytics on data using Python Jupyter Notebook, Watson Conversation & NAO Robot
### <font color='blue'> Expose an integration point using websockets for orchestration with Node-RED using Watson Conversation & NAO Robot.</font>
## 1. Setup
To prepare your environment, you need to install some packages.
### 1.1 Install the necessary packages
You need the latest versions of these packages:<br>
- websocket-client: is a python client for the Websockets.<br>
** Install the websocket client: **
```
!pip install websocket-client
!pip install python-swiftclient
```
### 1.2 Import packages and libraries
Import the packages and libraries that are needed:
```
from io import StringIO
import requests
import json
import swiftclient
import pandas as pd
import websocket
import json
from datetime import datetime
import thread
import time
import numpy as np
```
## 2. Configuration
Add configurable items of the notebook below
### 2.1 Add your service credentials for Object Storage
You must create Object Storage service on Bluemix.
To access data in a file in Object Storage, you need the Object Storage authentication credentials.
Insert the Object Storage authentication credentials as <i><b>credentials_1</b></i> in the following cell after
removing the current contents in the cell.
# 3. Persistence and Storage
### 3.1 Configure Object Storage Client
```
auth_url = credentials_1['auth_url']+"/v3"
container = credentials_1["container"]
IBM_Objectstorage_Connection = swiftclient.Connection(
key=credentials_1['password'], authurl=auth_url, auth_version='3', os_options={
"project_id": credentials_1['project_id'], "user_id": credentials_1['user_id'], "region_name": credentials_1['region']})
def create_container(container_name):
""" Create a container on Object Storage.
"""
x = IBM_Objectstorage_Connection.put_container(container_name)
return x
def put_object(container_name, fname, contents, content_type):
""" Write contents to Object Storage.
"""
x = IBM_Objectstorage_Connection.put_object(
container_name,
fname,
contents,
content_type)
return x
def get_object(container_name, fname):
""" Retrieve contents from Object Storage.
"""
Object_Store_file_details = IBM_Objectstorage_Connection.get_object(
container_name, fname)
return Object_Store_file_details[1]
```
## 4. Global Variable
#### Add global variable
```
Data = 'Data.csv'
```
## 5. Read the Data & convert it into Dataframe
```
'''Reading the dataset'''
df = pd.read_csv(StringIO(get_object(container, Data).decode('utf-8')))
df.head()
'''Rounding off the numeric columns in the dataset'''
tmp = df.select_dtypes(include=[np.number])
df.loc[:, tmp.columns] = np.round(tmp)
df.head()
```
## 6. Create Generic re-usable functions for Statistics
##### These functions can be applied to any dataset
```
def get_max_val(tuple1,tuple2,tuple3,tuple4):
'''This Function takes four input parameters to find max value'''
mr = df[(df[tuple1[0]] == tuple1[1]) & (df[tuple2[0]] == tuple2[1]) & (df[tuple3[0]] == tuple3[1])] [tuple4[0]].max()
return mr
def get_min_val(tuple1,tuple2,tuple3,tuple4):
'''This Function takes four input parameters to find min value'''
mr = df[(df[tuple1[0]] == tuple1[1]) & (df[tuple2[0]] == tuple2[1]) & (df[tuple3[0]] == tuple3[1])] [tuple4[0]].min()
return mr
def get_total_val(tuple1,tuple2,tuple3):
'''This Function takes three input parameters to find total value'''
mr = df[(df[tuple1[0]] == tuple1[1]) & (df[tuple2[0]] == tuple2[1])] [tuple3[0]].sum()
return mr
def get_avg_val(tuple1,tuple2,tuple3):
'''This Function takes three input parameters to find average value'''
mr = df[(df[tuple1[0]] == tuple1[1]) & (df[tuple2[0]] == tuple2[1])] [tuple3[0]].mean()
return mr
def top_three(t1,t2):
'''This Function takes two input parameters to find top three'''
tt = df.groupby(t1)[[t2]].sum().apply(lambda x: x.sort_values(ascending=False).head(3))
return tt
def top_new(t1,t2,t3):
'''This Function takes three input parameters to find top n value - top of n value is dynamic'''
tt = df.groupby(t1)[[t2]].sum().apply(lambda x: x.sort_values(ascending=False).head(t3))
return tt
def top_three_1(t1,t2,t3,t4):
'''This Function takes four input parameters to find top n value - top of n value is dynamic'''
df1 = df[(df[t1[0]] == t1[1])]
tt = df1.groupby(t2)[[t3]].sum().apply(lambda x: x.sort_values(ascending=False).head(t4))
return tt
def top_three_2(t1,t2,t3,t4,t5):
'''This Function takes five input parameters to find top n value - top of n value is dynamic'''
df1 = df[(df[t1[0]] == t1[1]) & (df[t2[0]] == t2[1])]
tt = df1.groupby(t3)[[t4]].sum().apply(lambda x: x.sort_values(ascending=False).head(t5))
return tt
def Bottom_three(t1,t2):
'''This Function takes two input parameters to find bottom three'''
bt = df.groupby(t1)[[t2]].sum().apply(lambda x: x.sort_values(ascending=False).tail(3))
return bt
def Bottom_new(t1,t2,t3):
'''This Function takes three input parameters to find bottom n value - bottom of n value is dynamic'''
bt = df.groupby(t1)[[t2]].sum().apply(lambda x: x.sort_values(ascending=False).tail(t3))
return bt
def bottom_three_1(t1,t2,t3,t4):
'''This Function takes four input parameters to find bottom n value - bottom of n value is dynamic'''
df1 = df[(df[t1[0]] == t1[1])]
tt = df1.groupby(t2)[[t3]].sum().apply(lambda x: x.sort_values(ascending=False).tail(t4))
return tt
def bottom_three_2(t1,t2,t3,t4,t5):
'''This Function takes five input parameters to find bottom n value - bottom of n value is dynamic'''
df1 = df[(df[t1[0]] == t1[1]) & (df[t2[0]] == t2[1])]
tt = df1.groupby(t3)[[t4]].sum().apply(lambda x: x.sort_values(ascending=False).tail(t5))
return tt
```
## 7. Expose Integration Point with a Websocket Client
```
'''This part of the code imports the question from websocket, executes the function as per
corresponding intent and sends the output back to the websocket.'''
def on_message(ws, message):
msg = json.loads(message)
for item in msg["botresponse"]["messageout"]["context"]["entities_array"]:
if item['entity'] == 'Year':
date = datetime.strptime(item['value'],'%Y')
item['value'] = date.year
print msg
entitylist = []
for index,entity in enumerate(msg["botresponse"]["messageout"]["context"]["entities_array"]):
print(entity,index)
entitylist.append((entity["entity"],entity["value"]))
contextlist = [list(e) for e in msg["botresponse"]["messageout"]["context"].items()]
if msg["botresponse"]["messageout"]["context"]["intent"] == 'summation':
response = get_total_val(entitylist[1],entitylist[0],entitylist[2])
response = round(response,2)
print(response)
elif msg["botresponse"]["messageout"]["context"]["intent"] == 'average':
response = get_avg_val(entitylist[1],entitylist[0],entitylist[2])
response = round(response,2)
print(response)
elif msg["botresponse"]["messageout"]["context"]["intent"] == 'max':
response = get_max_val(entitylist[2],entitylist[1],entitylist[0],entitylist[3])
response = round(response,2)
print(response)
elif msg["botresponse"]["messageout"]["context"]["intent"] == 'min':
response = get_min_val(entitylist[2],entitylist[1],entitylist[0],entitylist[3])
response = round(response,2)
print(response)
elif msg["botresponse"]["messageout"]["context"]["intent"] == 'top':
response = top_new(entitylist[0][0],entitylist[2][0],entitylist[1][0])
response = response.to_json()
print(response)
elif msg["botresponse"]["messageout"]["context"]["intent"] == 'bottom':
response = Bottom_new(entitylist[0][0],entitylist[2][0],entitylist[1][0])
response = response.to_json()
print(response)
Question = str(msg["botresponse"]["messageout"]["input"]["text"])
responsejson = {}
responsejson["response"] = {Question:response}
ws.send(json.dumps(responsejson))
def on_error(ws, error):
print(error)
def on_close(ws):
ws.send("DSX Listen End")
def on_open(ws):
def run(*args):
for i in range(10000):
hbeat = '{"cmd":"DSX HeartBeat"}'
ws.send(hbeat)
time.sleep(100)
thread.start_new_thread(run, ())
def start_websocket_listener():
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://NODERED_BASE_URL/ws/ws-robosocket",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
```
# 8. Start Websocket Client
```
start_websocket_listener()
```
| github_jupyter |
# Modeling Antibiotic Responce Through Datamining in the MIMICIII Dataset
###Garrett Eickelberg with help from Kishore Anekalla, Yikuan Li, Dr. Yuan Luo, Dr. Nelson Sanchez-Pinto
the overarching contents of this notebook will be detailed in my googledoc labnotebook
https://docs.google.com/document/d/1bJcv0ZUkjVhFga9ZOLMNV6pXR9jLEMQmPhLS93njrCc/edit#
1/21/19
the PAII project version of this cohort generation is gearing to get all patients suspected of having a bacteerial infection. for these patients, we want to find all patients who had a positive BLOOD culture atleast (12?) hours after their first antibiotic dose.
we will then use data up to 24 hours after this to predict culture results.
need to extract organism type, and t_sc_plus24
```
#libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
import collections
import asyncio
import getpass
import re
from datetime import datetime as dt
import os,sys,re
import urllib3
import prettytable
from collections import Counter
import seaborn as sns
import random
from datetime import timedelta
from pathlib import Path
from sklearn.externals.joblib import Memory
memory = Memory(cachedir='/tmp', verbose=0)
#@memory.cache above any def fxn.
%matplotlib inline
plt.style.use('ggplot')
from notebook.services.config import ConfigManager
cm = ConfigManager()
cm.update('livereveal', {
'width': 1024,
'height': 768,
'scroll': True,
})
%load_ext autotime
date= '04042019'
```
#reserach question:
-Can we predict the organism type for pts with positive blood cultures in patients suspected of having an infection with data up to 24 hours after the culture (ie the approximate time we would recieve teh culture result).
#cohort criteria:
- YES- only adult patients (over 18 yo) (~n= 38,000 age >18)
- with sterile culture
- and ab start within the same 24 hours time window.
Three subsets:
– Started on antibiotics, after 48hrs, negculture, discontinue : neg/stop
- 48hrs, negculture, but continue full course 7, 10, or 14 days: neg/continue
– Positive culture, continue full course.Predict whether kept on antibiotics:
pos/continue
Infected with positive culture
Infected with negative culture (ie a false positive)
#components: data cleaning, model building
```
# note, all server information is stored in a config.py file that is present in the .gitignore
import importlib
scriptName = 'config.py'
script = importlib.import_module(wd+'/notebooks/PIPELINE/%s' % scriptName)
#import config
conn = psycopg2.connect(dbname=config.dbname, user=config.user, host=config.host, port=config.port,password=config.password)
cur=conn.cursor()
query_schema = 'SET search_path to ' + "mimiciii" + ';'
ABrx = pd.read_csv('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/02082018_ABrx_updated.csv')
```
# generating patient culture/icu/hospital info
## sterile culture info
```
###commented this all out and will try to see what the patient icustay id landscape is like with just using blood cultures
#retrieving all patients with sterile culture. this was reverted because the "suspected infection criteria" needs to remain the same
##changed on 4/16/18 to include all hadm_id
sterile_pt_sql = query_schema + """
SELECT row_id, subject_id, hadm_id, chartdate, charttime, spec_itemid, spec_type_desc, org_name FROM mimiciii.microbiologyevents
WHERE NOT lower(spec_type_desc) = 'fluid received in blood culture bottles' AND
NOT lower(spec_type_desc) = 'blood bag fluid'
AND (lower(spec_type_desc) LIKE '%blood%'
OR lower(spec_type_desc) LIKE '%joint%'
OR lower(spec_type_desc) LIKE '%urine%'
OR lower(spec_type_desc) LIKE '%csf%'
OR lower(spec_type_desc) LIKE '%spinal%'
OR lower(spec_type_desc) LIKE '%pleural%'
OR lower(spec_type_desc) LIKE '%peritoneal%'
OR lower(spec_type_desc) LIKE '%bal%'
OR lower(spec_type_desc) LIKE '%bronchoalveolar lavage%') --bal and bronchoalveolar lavage added 7/16/18
group by subject_id, row_id
ORDER BY count(subject_id) DESC
"""
sterile_pt_df=pd.read_sql_query(sterile_pt_sql,conn) #361711 patients with sterile culture -> 374643 with addn of bal and broncho... 7/16/18
# #retrieving all patients with sterile culture
# ##changed on 4/16/18 to include all hadm_id
# sterile_pt_sql = query_schema + """
# SELECT row_id, subject_id, hadm_id, chartdate, charttime, spec_itemid, spec_type_desc, org_name
# FROM mimiciii.microbiologyevents
# WHERE NOT lower(spec_type_desc) like 'fluid received in blood culture bottles' AND
# NOT lower(spec_type_desc) like 'blood bag fluid'
# AND (lower(spec_type_desc) LIKE '%blood%')
# group by subject_id, row_id
# ORDER BY count(subject_id) DESC
# """
# sterile_pt_df=pd.read_sql_query(sterile_pt_sql,conn) #361711 patients with sterile culture -> 374643 with addn of bal and broncho... 7/16/18
# sterile_pt_df= sterile_pt_df.drop_duplicates(['hadm_id','charttime','spec_type_desc','org_name']) #added 1/21/19 to reduce the redundancy
#a more detailed patient/admission/age data query
pt_age_admin_sql = query_schema + """
select p.subject_id, c.hadm_id, c.admittime, c.dischtime, c.admission_location, c.admission_type, p.first_admit_age, p.dob, p.gender from mimiciii.admissions c
join (SELECT
p.subject_id, p.dob, p.gender, MIN( ROUND( (cast(admittime as date) - cast(dob as date)) / 365.242,2) )
AS first_admit_age
FROM mimiciii.patients p
INNER JOIN mimiciii.admissions a
ON p.subject_id = a.subject_id
GROUP BY p.subject_id, p.dob, p.gender
ORDER BY p.subject_id) p on c.subject_id = p.subject_id
"""
pt_age_admin_df = pd.read_sql_query(pt_age_admin_sql,conn) #38637
pt_age_admin_16_df= pt_age_admin_df.loc[pt_age_admin_df.loc[:,'first_admit_age']>16,:]
pt_age_admin_16_df = pt_age_admin_16_df.rename(index=str, columns={"admittime":'hosp_admit','dischtime':'hosp_disch'})
pt_source_sql = query_schema + """
SELECT subject_id, icustay_id, hadm_id, dbsource, intime, outtime, los, FIRST_WARDID, LAST_WARDID from mimiciii.icustays
"""
pt_source_df =pd.read_sql_query(pt_source_sql,conn) #46520 patients total, no ab, culture or age restrict
pt_source_df = pt_source_df.rename(index=str, columns={"intime":'ICU_admit','outtime':'ICU_disch'})
#merging icu info with patient admiin info- has all icustay id's with patient and admission info.
pt_icu_hosp_db_16 =pd.merge(pt_age_admin_16_df,pt_source_df.drop(['subject_id'], axis=1), left_on= 'hadm_id', right_on='hadm_id', how = 'right')
# len(pt_source_df) #61532
# len(pt_age_admin_16_df)#50857
# len(pt_icu_hosp_db_16) #61532
# pt_age_admin_df
# from pathlib import Path
# os.chdir('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling') #use to change working directory
# wd= os.getcwd() #'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling'
# date= '22112018'
# pd.DataFrame(pt_age_admin_df).to_csv(Path(
# wd+'/data/processed/%s_pt_age.csv' %(date)))
```
### mergeing sterile_pt_df with pt_icu_hosp_db_16
- sterile_all_pt_info:
is all the hospital, icu admission, and patient data for patients >=16yo w/ a sterile site culture.
- merging dfs to concat all the hospital, icu admission, and patient data for patients >=16yo w/ a sterile site culture.
```
#merging dfs to concat all the hospital, icu admission, and patient data for patients >=16yo w/ a sterile site culture.
sterile_all_pt_info= None
#filtering and merging patient/hospital/icu admin data for pt>16 yo, with all sterile culture pt.
sterile_all_pt_info = pd.merge(
pt_icu_hosp_db_16, sterile_pt_df.drop(['subject_id'], axis=1),
left_on= 'hadm_id',
right_on='hadm_id',
how = 'inner'
)
#making all sterile culture charttime null rows = to chartdate.
sterile_all_pt_info.loc[sterile_all_pt_info.loc[:,'charttime'].isnull(),'charttime'
] = sterile_all_pt_info.loc[sterile_all_pt_info.loc[:,'charttime'].isnull(),'chartdate']
#removing all null admit ages.
sterile_all_pt_info=sterile_all_pt_info[sterile_all_pt_info['first_admit_age'].notnull()]
sterile_all_pt_info=sterile_all_pt_info.rename(
index=str, columns={'charttime':'culture_charttime'}) #changing some column names for clarity
#07/13/18 -> 01/21/19 using just blood cultures
print(len(sterile_all_pt_info), #424493 ->237020 rows
len(sterile_all_pt_info['hadm_id'].unique()), #34915 -> 28207
len(sterile_all_pt_info['icustay_id'].unique()))# 38292 -> 31233
###History:
#previously we wanted to only include sterile cultures taken within icu, but we changed our mind on that.
#ptinfo_ICUsterile_16 was origionally just in ICU, while sterile_all_pt_info was in and outside icu.
#i have sense removed this and adjusted all instances to be sterile_all_pt_info. the code to make this past sheet is below:
#removed below lines because we want to include sterile cultures outside of ICU.
#annotates the sterile cultures and returns True if it's within the icu time window.
##sterile_all_pt_info['sterile_dur_icu']= (sterile_all_pt_info['ICU_disch'] >= sterile_all_pt_info['charttime']) &(sterile_all_pt_info['charttime']>=sterile_all_pt_info['ICU_admit'])
#sterile_all_pt_info_icu= sterile_all_pt_info.loc[sterile_all_pt_info.loc[:,'sterile_dur_icu']==True,:]
```
# building patient list from PRESCRIPTION table
using the prescription antibiotic list and the list of dates of ssc cultures, building patient list that annotates first ab within 24 hr of sc for each pt.
```
sterile_all_pt_info_pt= sterile_all_pt_info.drop_duplicates('icustay_id')#38292 on 07-13-18
ABrx_ntnull= ABrx.loc[ABrx.loc[:,"icustay_id"].notnull(),:] #removing null icustay_id
ABrx_16sterile_ntnul = None
#have to convert icustay_id's to numbers to merge
sterile_all_pt_info_pt['icustay_id']= pd.to_numeric(sterile_all_pt_info_pt['icustay_id'])
ABrx_ntnull['icustay_id']= pd.to_numeric(ABrx_ntnull['icustay_id'])
#merging antibiotics table w/ all the hospital, icu admission, and patient data for patients >=16yo w/ a sterile site culture.
ABrx_16sterile_ntnul= pd.merge(
ABrx_ntnull,
sterile_all_pt_info_pt.drop(['subject_id','hadm_id','row_id'], axis=1),
left_on= 'icustay_id',
right_on='icustay_id',
how = 'inner')
print(len(ABrx_16sterile_ntnul))
#added on 4/24/18: there are some erronious rows where start date > enddate. these are being removed.
keep_criteria1 = pd.to_datetime(ABrx_16sterile_ntnul['startdate']) <= pd.to_datetime(ABrx_16sterile_ntnul['enddate'])
ABrx_16sterile_ntnul= ABrx_16sterile_ntnul[keep_criteria1]
print(len(ABrx_16sterile_ntnul), #2521764 rows -> 111796 -> 132083 ->130646 after remove criteria
len(ABrx_16sterile_ntnul['hadm_id'].unique()), #26701 -> 17898 -> 24282
len(ABrx_16sterile_ntnul['icustay_id'].unique()), # 28429 ->18778 -> 26009
len(ABrx_16sterile_ntnul['subject_id'].unique()))# 22035 -> 14903 -> 19638 #final set is after readding back the sterile culture outside of icu stay.
#->19790 after remove criteria -> 16512
len(ABrx_ntnull['icustay_id'].unique())
%pylab inline
pylab.rcParams['figure.figsize'] = (10, 6)
ABrx_16sterile_ntnul['first_admit_age'].sort_values(ascending=True).plot.hist(100)
ABrx_16sterile_ntnul.head()
```
### annotating pt who have antibiotics data in cv, mv, or both.
```
# updated 4/11/18
cv_pts= ABrx_16sterile_ntnul[ABrx_16sterile_ntnul['dbsource']=='carevue']['subject_id'].unique()
mv_pts= ABrx_16sterile_ntnul[ABrx_16sterile_ntnul['dbsource']=='metavision']['subject_id'].unique()
both_pts= []
for pt in ABrx_16sterile_ntnul["subject_id"].unique():
if pt in cv_pts and pt in mv_pts:
both_pts.append(pt)
#need to annotate in pt_db_association column
ABrx_16sterile_ntnul.loc[ABrx_16sterile_ntnul.loc[:,'dbsource']=='carevue','pt_ab_db']='cv_ab'
ABrx_16sterile_ntnul.loc[ABrx_16sterile_ntnul.loc[:,'dbsource']=='metavision','pt_ab_db']='mv_ab'
ABrx_16sterile_ntnul.loc[ABrx_16sterile_ntnul.loc[:,'dbsource']=='both','pt_ab_db']='both_marking_ab'
ABrx_16sterile_ntnul.loc[ABrx_16sterile_ntnul.loc[:,'subject_id'].isin(both_pts),'pt_ab_db']='cv/mv_ab'
ABrx_16sterile_ntnul_pts = ABrx_16sterile_ntnul.sort_values('startdate').drop_duplicates('subject_id')
ABrx_16sterile_ntnul_pts = ABrx_16sterile_ntnul_pts[['subject_id','Antibiotics','dob','gender','first_admit_age','pt_ab_db']]
#pt_ab_age_df = pd.merge(pt_sterile16_list, ABrx_merged_16sterile_ntnul_pts[['subject_id','pt_ab_db']], left_on= 'subject_id', right_on='subject_id',how = 'inner')
ABrx_16sterile_ntnul_pts['pt_ab_db'].value_counts() #total: 14903
#numbers before regenerating dataset (filtered for only culture dates within icu date windows -> pt has any sterile culture)
# cv_ab 10394 -> 9639 -> 9629 -> 8187 for only blood cultures
# mv_ab 9993 -> 9243 -> 9228 _> 7681
# cv/mv_ab 777 -> 684 -> 682 -> 576
# both_marking_ab 72 -> 72 -> 72 -> 68
# Name: pt_ab_db, dtype: int64
```
### restricting df rows to only those with 24hr associated AB and SC
- marking first ab date within 24hr of sterile site culture
- restricting to only rows where the ab's are at or after the first date (first date = first ab is within 24 hour of culture.)
- note: the t_end consec is established in the next set to determine consecutive ab days.
```
def AB_SC_associated_time_annotation(ABrx_16sterile_ntnul):
"""
fxn: annotates antibiotic dataframe with t_0 and t_end for each patient.
##t0 is the first ab date in icu associated within 24 hours of sterile site culture
##t_end is the longest duration ab associated with t0
input: the merged dataframe consisting of all antibiotic data, hospital, icu admission, and patient data
for patients >=16yo w/ a sterile site culture and no null ICUstay_id's (named ABrx_16sterile_ntnul).
output: ABrx_16sterile_ntnul annotated with t_0 and t_end for the FIRST instance of an ab being given in the icu
within 24 hours of a sterile site culture.
"""
#boolean criteria where delta between the AB startdate and culture_charttime is less than 24hr
lessthan24hr= abs(pd.to_datetime(ABrx_16sterile_ntnul['startdate']) - pd.to_datetime(
ABrx_16sterile_ntnul['culture_charttime'])) <='24:00:00'
#column annotating the delta between the AB startdate and culture_charttime is less than 24hr
ABrx_16sterile_ntnul['ab_culture_delta'] = (
pd.to_datetime(ABrx_16sterile_ntnul['startdate'])
- pd.to_datetime(ABrx_16sterile_ntnul['culture_charttime'])
)
#df of all rows where delta between the AB startdate and culture_charttime is less than 24hr
ABrx_ab24hrdelta= ABrx_16sterile_ntnul.loc[lessthan24hr]
#annotating t_0: the first ab date in icustay associated within 24 hours of sterile site culture
ABrx_ab24hrdelta['t_0']= ABrx_ab24hrdelta.groupby('icustay_id')['startdate'].transform(lambda x: min(x))
ABrx_16sterile_ptAB24startdate= ABrx_ab24hrdelta[['icustay_id','t_0']].drop_duplicates('icustay_id')
ABrx_16sterile_ptAB24enddate= ABrx_ab24hrdelta[
ABrx_ab24hrdelta['startdate']==ABrx_ab24hrdelta[
't_0']].sort_values(
['subject_id','icustay_id','enddate'], ascending=[True,True,False]).drop_duplicates('icustay_id')
#adding t_0 and t_end to ABrx_16sterile_ntnul by merging w/ AB24startdate and enddate above
ABrx_16sterile_ntnul= (
pd.merge(
ABrx_16sterile_ntnul,
ABrx_16sterile_ptAB24startdate,
left_on= 'icustay_id', right_on='icustay_id', how = 'left')
)
ABrx_16sterile_ntnul= (
pd.merge(
ABrx_16sterile_ntnul,
ABrx_16sterile_ptAB24enddate[['icustay_id','enddate']],
left_on= 'icustay_id', right_on='icustay_id', how = 'left')
)
#renaming columns for ease
ABrx_16sterile_ntnul=(
ABrx_16sterile_ntnul.rename(index=str, columns={'enddate_x':'enddate',"enddate_y":"t_end"})
)
return(ABrx_16sterile_ntnul)
def AB_SC_associated_cohort_restriction(ABrx_16sterile_ntnul):
"""
fxn: restrict the input dataframe to only rows where the ab's are at or after the first date where an ab is within 24 hour of culture.
input: the merged dataframe consisting of all antibiotic data, hospital, icu admission, and patient data
for patients >=16yo w/ a sterile site culture and no null ICUstay_id's (named ABrx_16sterile_ntnul).
output: ABrx_16sterile_ntnul (updated) annotated with t_0 and t_end for the FIRST instance of an ab being given in the icu
within 24 hours of a sterile site culture.
ABrx_16sterile_ntnul2 (updated) annotated with t_0 and t_end for the FIRST instance of an ab being given in the icu
within 24 hours of a sterile site culture (same as ntnul above) AND filtered to only rows where the ab's are at or after the first date where an ab is within 24 hour of culture.
this should be df where all ab_icustart dates are greater than or equal to the first 24 hour associated ab start date after icu admission.
"""
ABrx_16sterile_ntnul = AB_SC_associated_time_annotation(ABrx_16sterile_ntnul)
ABrx_16sterile_ntnul2=(
ABrx_16sterile_ntnul.loc[
ABrx_16sterile_ntnul['startdate'] >= ABrx_16sterile_ntnul['t_0']]
)
ABrx_16sterile_ntnul2['t_end_consec']=ABrx_16sterile_ntnul2['t_end']
ABrx_16sterile_ntnul2['abduration']= (
pd.to_datetime(ABrx_16sterile_ntnul2['enddate'])
- pd.to_datetime(ABrx_16sterile_ntnul2['startdate'])
)
ABrx_16sterile_ntnul2.sort_values(['subject_id','startdate'])
return(ABrx_16sterile_ntnul, ABrx_16sterile_ntnul2) #this should be df where all ab_icustart dates are greater than or
#equal to the first 24 hour associated ab start date after icu admission.
ABrx_16sterile_ntnul, ABrx_16sterile_ntnul2= AB_SC_associated_cohort_restriction(ABrx_16sterile_ntnul)
ABrx_16sterile_ntnul.head()
len(ABrx_16sterile_ntnul)
len(ABrx_16sterile_ntnul2)
ABrx_16sterile_ntnul2.head()
###Algorithm to squash all dates into a timespan that covers the continuous days on any AB starting at the first ab within 24 hour of a sc
@memory.cache
def update_t_end(t):
t.loc[
((pd.to_datetime(t['startdate']) <= pd.to_datetime(t['t_end_consec'])) & (
pd.to_datetime(t['enddate']) >= pd.to_datetime(t['t_end_consec']))),'t_end_consec']= t.loc[
((pd.to_datetime(t['startdate']) <= pd.to_datetime(t['t_end_consec'])) & (
pd.to_datetime(t['enddate']) >= pd.to_datetime(t['t_end_consec']))),'enddate']
t['t_end_consec']= t.groupby('icustay_id')['t_end_consec'].transform(lambda x: max(x))
t['abduration']= pd.to_datetime(t['t_end_consec'])-pd.to_datetime(t['t_0'])
return(t)
```
### Calculate and annotate the total continuous time on ANY antibiotic during each patient’s icu stay
```
def t_end_consec_annotation(ABrx_16sterile_ntnul2):
"""
input: df,
designed for ABrx_16sterile_ntnul2-this should be df where all ab_icustart dates are greater than or equal to the first 24 hour associated ab start date after icu admission.
output: ABrx_16sterile_ntnul_final: ABrx_16sterile_ntnul2 with t_start and t_end updated. each antibiotic
ABrx_16sterile_ntnul_finalPT: final by pt spreadsheet where the first ab prescription meeting the 24hr sc window is listed.
said another way, using the prescription antibiotic list and the list of dates of ssc cultures, building patient list that annotates first ab within 24 hr of sc for each pt.
"""
important_columns=[
'subject_id',
'icustay_id',
'hadm_id', #added 8/14/18
'drug',
'culture_charttime',
'ab_culture_delta',
'startdate',
'enddate',
't_0',
't_end',
'abduration',
't_end_consec',
'ICU_admit',
'ICU_disch'
]
ABrx_16sterile_ntnul_final= ABrx_16sterile_ntnul2[important_columns].sort_values(
['subject_id',
'hadm_id', #added 8/14/18
'icustay_id',
'startdate',
'abduration'], ascending=[True,True, True, False, True]) #added another True 8/14/18
#updating t_end consec by running my update_t_end algorithm 5 times.
i=1
while i<6:
ABrx_16sterile_ntnul_final= update_t_end(ABrx_16sterile_ntnul_final)
i+=1
#making a final by pt spreadsheet where the first ab prescription meeting the 24hr sc window is listed
ABrx_16sterile_ntnul_finalPT = ABrx_16sterile_ntnul_final.sort_values(
['hadm_id','icustay_id','startdate'], ascending=[True,True,True]).drop_duplicates('hadm_id') #changed subject_id to hadm_id on 8/14/18
ABrx_16sterile_ntnul_finalPT = ABrx_16sterile_ntnul_finalPT.sort_values(
['hadm_id','icustay_id','startdate'], ascending=[True,True,True]).drop_duplicates('icustay_id') #changed subject_id to hadm_id on 8/14/18
return(ABrx_16sterile_ntnul_final, ABrx_16sterile_ntnul_finalPT)
ABrx_16sterile_ntnul_final, ABrx_16sterile_ntnul_finalPT= t_end_consec_annotation(ABrx_16sterile_ntnul2)
#all patients with appropriate antibiotic timing in relation to sterile culture.
print(ABrx_16sterile_ntnul_finalPT['abduration'].describe())
(ABrx_16sterile_ntnul_finalPT['abduration'] / pd.Timedelta(days=1)).hist(bins = 63)#bins=xrange(min(compare_table['date delta']), 120, 1))
plt.xlabel(' time on antibiotics (days)')
plt.ylabel('# of icustay_ids');
print(ABrx_16sterile_ntnul_finalPT.loc[ABrx_16sterile_ntnul_finalPT.loc[:,'abduration']<= pd.Timedelta(days=7),'abduration'].value_counts())
print(ABrx_16sterile_ntnul_finalPT.loc[ABrx_16sterile_ntnul_finalPT.loc[:,'abduration']> pd.Timedelta(days=7),'abduration'].value_counts(bins=1))
#4/24: n=12613.
#7/6/18: n=12611
#ABrx_16sterile_ntnul_finalPT.head()
ABrx_16sterile_ntnul_finalPT.loc[:,'hadm_id'].value_counts().describe()
ABrx_16sterile_ntnul_finalPT.loc[:,'icustay_id'].value_counts().describe()
```
# adding in DOD
we realized later on that we need dod in our spreadsheet. below adds it to our final patient set.
```
#task 3, dod
#issue: there is a differential on 155 ssd's.
#(DOD_HOSP) or the social security database (DOD_SSN).
list_pt =ABrx_16sterile_ntnul_finalPT['subject_id'].unique()
dod_sql = query_schema + """
SELECT subject_id, DOD_HOSP, DOD_SSN
from mimiciii.patients
WHERE subject_id in {}
""".format(tuple(list_pt))
dod_df = pd.read_sql_query(dod_sql,conn)
dod_df['delta']= dod_df['dod_hosp']- dod_df['dod_ssn']
#dod_df.loc[dod_df.loc[:,'delta'].notnull()!=timedelta(days=0),:]
dod_df2= dod_df[dod_df['delta'].notnull()]
dod_df2.loc[dod_df2.loc[:,'delta']!= timedelta(days=0),:]
# categorize pt into ab bins
#task 4, categorize every patient into ab bins
# group 1= partial= is antibiotics for 3 days or less
# group 2= full= is antibiotics for 5 or more days or died during ab course
ABrx_16sterile_ntnul_finalPT = pd.merge(
ABrx_16sterile_ntnul_finalPT,
dod_df[['subject_id','dod_hosp','dod_ssn']],
left_on= 'subject_id',
right_on='subject_id',
how = 'left')
ABrx_16sterile_ntnul_finalPT['ab_course']=''
ABrx_16sterile_ntnul_finalPT.loc[
ABrx_16sterile_ntnul_finalPT.loc[:,'abduration'] <= pd.Timedelta(days=4),'ab_course']='partial' #days changed from 3 to 4 on 7/16/18 based on Nelson's suggestion
ABrx_16sterile_ntnul_finalPT.loc[
ABrx_16sterile_ntnul_finalPT.loc[:,'abduration'] >= pd.Timedelta(days=5),'ab_course']='full'
ABrx_16sterile_ntnul_finalPT.loc[
((pd.to_datetime(ABrx_16sterile_ntnul_finalPT['t_end_consec'])+ pd.DateOffset(1))
>= pd.to_datetime(ABrx_16sterile_ntnul_finalPT['dod_hosp'])),
'ab_course']='full'
ABrx_16sterile_ntnul_finalPT.loc[
((pd.to_datetime(ABrx_16sterile_ntnul_finalPT['t_end_consec'])+ pd.DateOffset(1))
>= pd.to_datetime(ABrx_16sterile_ntnul_finalPT['dod_ssn'])),
'ab_course']='full'
print(len(
ABrx_16sterile_ntnul_finalPT.loc[
ABrx_16sterile_ntnul_finalPT.loc[:,'ab_course']=='full','subject_id'].unique())) #5485 full patients -> 6188 full on 7/6
print(len(
ABrx_16sterile_ntnul_finalPT.loc[
ABrx_16sterile_ntnul_finalPT.loc[:,'ab_course']=='partial','subject_id'].unique())) #5949 partial patients -> 5305 partial on 7/6
print(
ABrx_16sterile_ntnul_finalPT.loc[
ABrx_16sterile_ntnul_finalPT.loc[:,'abduration']<= pd.Timedelta(days=7),'abduration'].value_counts())
print(
ABrx_16sterile_ntnul_finalPT.loc[
ABrx_16sterile_ntnul_finalPT.loc[:,'abduration']>= pd.Timedelta(days=8),'abduration'].value_counts(bins=1))
##not sure the purpose of this:
#ABrx_16sterile_ntnul_finalPT.loc[ABrx_16sterile_ntnul_finalPT.loc[:,'abduration']== pd.Timedelta(days=0),:]
ABrx_16sterile_ntnul_finalPT['icustay_id'].nunique() #x12611 subject id's, x12611 icustay id's ->13763 icustay_id
#ABrx_16sterile_ntnul_finalPT['subject_id'].nunique() #11557 subject_id
```
# Extracting BLOOD Culture results
### accounting for all sterile cultures taken within 72 hours of the first ab.
4/22/18 (overhauled on 1/25/19(
we now have a by pt spreadsheet where the first ab prescription meeting the 24hr sc window is listed. these are only for ab's that have a ssc within 24 hours of starting.
for each patient we have the ab start date (t_0) and the end of the consecutive ab time window (t_end)
```
def SSC_for_cohort(final_abdata_pt_df=ABrx_16sterile_ntnul_finalPT,
ssc_sql=sterile_pt_df,
blood_only=False,
n_days=3,
n_day_column='t_0_sc',
first_ssc_after_ICU=False,
filter_t0_after_icu=False,
n_filter_days=4):
"""
input: ABrx_16sterile_ntnul_finalPT: final by pt spreadsheet where the first ab prescription meeting the 24hr sc window is listed.
said another way, using the prescription antibiotic list and the list of dates of ssc cultures, building patient list that annotates first ab within 24 hr of sc for each pt.
n_days
sterile_pt_df: the primary sql query df for ssc info from mimic.
blood_only: option to restrict to only blood cultures
n_days, n_day_column: origionally the criteria was to find cultures within 3 days window of t_0 and t_0_sc, if any were pos then patient was culture pos, else negative. we can sepcify column and day window if we want to look from ICU_admit instead.
first_ssc_after_ICU: allows option to restrict the culture result output to only those after ICU admission.
outputs:
all_cultures_pt_df_ab_cohort: all (neg and pos) culture results for patients meeting criteria
notneg_cultures_pt_df_ab_cohort: just positive culture results for patients meeting criteria
positive_organisms: all positive organisms found in ssc instances between t_0_sc and n_day_column.
"""
#option criteria1: to restrict to only blood cultures
if blood_only == True:
ssc_sql=(ssc_sql.loc[ssc_sql.loc[:,'spec_type_desc']
.isin(["BLOOD CULTURE",
'BLOOD CULTURE ( MYCO/F LYTIC BOTTLE)'])
,:])
ab_cohort_pt_info = final_abdata_pt_df[['hadm_id','culture_charttime','t_0','ICU_admit','dod_ssn']] #culture charttime is the culture recorded as being within 24 hr of t_0
##optional criteria2: only patients who started their ab (t_0) within n_filter_days of icu_admit? -this is to limit to non-2dary infections.
if filter_t0_after_icu == True:
n_filter_days= pd.Timedelta('%s days' %(n_filter_days))
ab_cohort_pt_info['t0_admit_delta']=pd.to_datetime(ab_cohort_pt_info['t_0']) - pd.to_datetime((ab_cohort_pt_info['ICU_admit']))
ab_cohort_pt_info = ab_cohort_pt_info[ab_cohort_pt_info['t0_admit_delta']<= n_filter_days]
sterile_pt_df_ab_cohort = pd.merge(ssc_sql,
ab_cohort_pt_info ,
left_on='hadm_id',
right_on='hadm_id',
how='inner' ) #adding ['hadm_id','culture_charttime','t_0','ICU_admit','dod_ssn'] onto the culture results for all patients
#making the Null charttime values = to chartdate
cd= sterile_pt_df_ab_cohort.loc[sterile_pt_df_ab_cohort.loc[:,'charttime'].isna(),"chartdate"]
sterile_pt_df_ab_cohort.loc[sterile_pt_df_ab_cohort.loc[:,'charttime'].isna(),"charttime"]= cd
sterile_pt_df_ab_cohort= sterile_pt_df_ab_cohort.rename(columns={'culture_charttime':'t_0_sc'})
#ensuring columns are in datetime format
sterile_pt_df_ab_cohort['t_0_sc']= pd.to_datetime(sterile_pt_df_ab_cohort['t_0_sc'])
sterile_pt_df_ab_cohort['charttime']= pd.to_datetime(sterile_pt_df_ab_cohort['charttime'])
sterile_pt_df_ab_cohort['ICU_admit']= pd.to_datetime(sterile_pt_df_ab_cohort['ICU_admit'])
#sterile_pt_df_ab_cohort.groupby('hadm_id')['charttime'].transform(lambda x: min(x))#was used to check that culture_charttime is indeed first sc ab within 24 hr of t_0.
####filtering criteria for all SSC results:######
#optional criteria3: limiting to only rows where ssc was performed after ICU admit.
if first_ssc_after_ICU ==True:
sterile_pt_df_ab_cohort['ssc_admit_delta']= pd.to_datetime(sterile_pt_df_ab_cohort['charttime']) -pd.to_datetime(sterile_pt_df_ab_cohort['ICU_admit'])
sterile_pt_df_ab_cohort= sterile_pt_df_ab_cohort[sterile_pt_df_ab_cohort['ssc_admit_delta']>pd.Timedelta('0 days')] #only rows where culture occured in icu.
#criteria1: only results at or after the first positive within 24 hr of t_0
criteria1= sterile_pt_df_ab_cohort['charttime']>=sterile_pt_df_ab_cohort['t_0_sc'] #t_0_scis the first ssc within 24 hr of t_0
#criteria2: only results within n_days of n_day_column
t_window= pd.Timedelta('%s days' %(n_days))
criteria2= (sterile_pt_df_ab_cohort['charttime'])<=(sterile_pt_df_ab_cohort[n_day_column]+ t_window) #ensure it's within n_days of n_day_column (default = culture_chartime, but can switch to t_admit)
all_cultures_pt_df_ab_cohort = sterile_pt_df_ab_cohort[criteria1 & criteria2].drop_duplicates(['hadm_id', 'org_name', 'charttime'])
notneg_cultures_pt_df_ab_cohort= all_cultures_pt_df_ab_cohort[all_cultures_pt_df_ab_cohort['org_name'].notnull()].sort_values(['hadm_id','charttime'], ascending=[True, False]) #16022 rows... 6619 hadm_id
positive_organisms= notneg_cultures_pt_df_ab_cohort['org_name'].value_counts()
final_pt_df= all_cultures_pt_df_ab_cohort[['subject_id','hadm_id','t_0_sc']]
return(all_cultures_pt_df_ab_cohort,notneg_cultures_pt_df_ab_cohort,positive_organisms, final_pt_df)
# #for origional cohort:
# all_cultures_pt_df_ab_cohort, notneg_cultures_pt_df_ab_cohort, positive_organisms, final_pt_df1 = SSC_for_cohort(final_abdata_pt_df=ABrx_16sterile_ntnul_finalPT,
# ssc_sql=sterile_pt_df,
# blood_only=False,
# n_days=3,
# n_day_column='t_0_sc',
# first_ssc_after_ICU=False)
# #for testing:
# all_cultures_pt_df_ab_cohort, notneg_cultures_pt_df_ab_cohort, positive_organisms, final_pt_df1 = SSC_for_cohort(final_abdata_pt_df=ABrx_16sterile_ntnul_finalPT,
# ssc_sql=sterile_pt_df,
# blood_only=False,
# n_days=3,
# n_day_column='t_0_sc',
# first_ssc_after_ICU=True)
#for PAII cohort:
all_cultures_pt_df_ab_cohort, notneg_cultures_pt_df_ab_cohort, positive_organisms, final_pt_df1 = SSC_for_cohort(final_abdata_pt_df=ABrx_16sterile_ntnul_finalPT,
ssc_sql=sterile_pt_df,
blood_only=True,
n_days=3,
n_day_column='t_0_sc',
first_ssc_after_ICU=True,
filter_t0_after_icu=True,
n_filter_days=4)
all_cultures_pt_df_ab_cohort['hadm_id'].nunique() #15203 -> 12388 if we choose only after ICU admit
#9129-> 8731 with icu delta filter
```
From what I hear you saying, we could look at the first positive infection culture after the "suspected infection" clock starts (t_0) up through 72 hours (t_72). for each patient, we find their positive_ssc_t0 and collect data between t_0 and positive_ssc_t0+72. the idea behind this is that we want to use the data starting from the point in time they are suspected of having an infection (ie when ab start) up through 12 hours after they get their first positive culture taken. is this correct?
* no collect data from culture start to
```
#big question to answer, all sterile cultures within 48 hours of what? i am going to set timeframe for each patient:
#look at all cultures between the first SC within 24 hour of ab and up to 48 hours after first ab dose in ICU.
#this means the max i will cover is 72 hours.
list(ABrx_16sterile_ntnul)
ABrx_16sterile_ntnul['subject_id'].nunique() #19000 ->16512 1/21/19
###historical record, replaced with SSC_for_cohort. which has more simple and interpretable code.
# def pt_sc_results(ABrx_16sterile_ntnul):
# """
# input: ABrx_16sterile_ntnul (updated) annotated with t_0 and t_end for the FIRST instance of an ab being given in the icu
# within 24 hours of a sterile site culture.
# outputs:
# patients_sc_dates:
# final_pts_window_sc: all sterile site culture instances between t_0_sc and t_end_sc for patients
# positive_organisms: all positive organisms found in ssc instances between t_0_sc and t_end_sc.
# """
# #filtering to all rows where the patient has a ab and ssc within 24 hr.
# lessthan24hr= abs(pd.to_datetime(ABrx_16sterile_ntnul['startdate']) - pd.to_datetime(
# ABrx_16sterile_ntnul['culture_charttime'])) <='24:00:00'
# ABrx_ab24hrdelta= ABrx_16sterile_ntnul.loc[lessthan24hr]
# ABrx_ab24hrdelta['t_0_sc']= ABrx_ab24hrdelta.groupby('icustay_id')['culture_charttime'].transform(lambda x: min(x))
# ABrx_ab24hrdelta['t_end_sc']= pd.to_datetime(ABrx_ab24hrdelta['t_0']) + timedelta(days=3)
# #filtering it to only essential info, and only for patients who made it through all other criteria.
# patients_sc_dates= ABrx_ab24hrdelta[['subject_id','icustay_id','hadm_id','t_0_sc','t_end_sc']].drop_duplicates('icustay_id')
# #filtering to only pt in ABrx_16sterile_ntnul_finalPT
# patients_sc_dates= patients_sc_dates.loc[
# patients_sc_dates.loc[:,'icustay_id'].isin(
# list(ABrx_16sterile_ntnul_finalPT['icustay_id'].unique())),:
# ]
# final_pts_all_sc= sterile_pt_df.loc[sterile_pt_df.loc[:,'hadm_id'].isin(list(patients_sc_dates['hadm_id'].unique())),:]
# final_pts_all_sc= pd.merge(final_pts_all_sc,patients_sc_dates[['hadm_id','t_0_sc','t_end_sc']],
# left_on= 'hadm_id',
# right_on='hadm_id',
# how = 'inner')
# #pd.to_datetime(ABrx_16sterile_ntnul['startdate']) - pd.to_datetime(ABrx_16sterile_ntnul['culture_charttime'])
# #all blood culture instances between t_0_sc and t_end_sc for patients
# final_pts_window_sc= final_pts_all_sc[(final_pts_all_sc['charttime']>=final_pts_all_sc['t_0_sc']) &
# (final_pts_all_sc['charttime']<=final_pts_all_sc['t_end_sc']) &
# final_pts_all_sc['org_name'].notnull()]
# final_pts_window_sc= final_pts_window_sc.drop_duplicates(['hadm_id', 'org_name', 'charttime'])
# positive_organisms= final_pts_all_sc[(final_pts_all_sc['charttime']>=final_pts_all_sc['t_0_sc']) &
# (final_pts_all_sc['charttime']<=final_pts_all_sc['t_end_sc']) &
# final_pts_all_sc['org_name'].notnull()]['org_name'].value_counts()
# return(patients_sc_dates, final_pts_window_sc, positive_organisms)
#patients_sc_dates, final_pts_window_sc, positive_organisms = pt_sc_results(ABrx_16sterile_ntnul)
#final_pts_window_sc #~3000 with only blood, 8582 with all
# print(final_pts_window_sc['hadm_id'].nunique()) #wow only 2107->2156 hadm_id's within t_0 and t_42->t_72 a positive blood culture?
###grr wtf, somehow my n went down to 2074 even after expanding criteria to be t_0 include all sc, and just find positive BLOOD cultures.
###how does thsi shit happen?
#print(len(patients_sc_dates), len(final_pts_window_sc), len(positive_organisms))
```
# SC culture filtering and ab death dose adjust
### import the annotated culture list, exclude the excludes, and get an # of positives for the staphs.
08/16/18 qc. after chaning the patient criteira, i found that a lot of patients with multiple icustays now have identical culture results. need to explore where this is occuring.
```
notneg_cultures_pt_df_ab_cohort.loc[notneg_cultures_pt_df_ab_cohort.loc[:,'hadm_id']==169392, :] #13
#all sterile site culture instances between t_0_sc and t_end_sc for patients annotated.
#need further processing to make a by patient +/- within t_0_sc and t_end_sc call.
##details: all staph infections require 2 positive cultures in time window.
annotated_sc = pd.read_csv('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/processed/positive_organisms_4-24-18_NSPComments.csv')
#organisms that grew in sc in our patients. annotation key:
#1= eliminate
#0= keep
#2= requires 2 positives in same patient within the timewindow.
notneg_cultures_pt_df_ab_cohort= pd.merge(notneg_cultures_pt_df_ab_cohort,annotated_sc[['org_name','Eliminate']],
left_on= 'org_name',
right_on='org_name',
how = 'left')
#final_pts_window_sc
notneg_cultures_pt_df_ab_cohort[notneg_cultures_pt_df_ab_cohort['hadm_id']==112218]
# staph_count[staph_count['hadm_id']==112218]
def sc_pos_filter(notneg_cultures_pt_df_ab_cohort):
#coag_neg_staph_counting:
staph_cultures= notneg_cultures_pt_df_ab_cohort.loc[notneg_cultures_pt_df_ab_cohort.loc[:,'Eliminate']==2,:].drop_duplicates(['hadm_id', 'org_name', 'charttime'])
staph_count = staph_cultures.groupby('hadm_id')['hadm_id'].agg(["count"]).rename(columns={'count': 'staph_count'})
staph_count= pd.DataFrame(staph_count).reset_index()
merged = pd.merge(
notneg_cultures_pt_df_ab_cohort,
staph_count, #changed subject_id to hadm_id 08/16/2018
left_on= 'hadm_id', #changed subject_id to hadm_id 08/16/2018
right_on='hadm_id', #changed subject_id to hadm_id 08/16/2018
how = 'left')
#combining staph, positive org and neg org criteria to make only positive org list.
criteria1= merged.loc[:,'Eliminate']==0
criteria2= merged.loc[:,'staph_count']>1
merged= merged[criteria1|criteria2] #all unique values betwen the two
merged= merged[merged['Eliminate']!=1] #eliminate =1 means to get rid of culture result, ie likely contamination
#merged
return(merged)
pos_cultures_pt_df_ab_cohort = sc_pos_filter(notneg_cultures_pt_df_ab_cohort) #31108 #all pos sc for patients with any positives.
pos_cultures_pt_df_ab_cohort= pos_cultures_pt_df_ab_cohort.drop_duplicates(['hadm_id', 'org_name', 'charttime']) #removing duplicates, not sure why dups exist
pos_cultures_pt_df_ab_cohort_list= list(pos_cultures_pt_df_ab_cohort['hadm_id'].unique()) #changed to hadm_id 08/16/2018
pos_cultures_pt_df_ab_cohort.sort_values(['hadm_id', 'org_name', 'charttime']).head(10)
```
### Gram +/- categorization
```
###gram_categorizing +/-
gram_categorizing= pd.DataFrame(notneg_cultures_pt_df_ab_cohort['org_name'].value_counts().reset_index())
date='21012019'
pd.DataFrame(gram_categorizing).to_csv("/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_gram_categorizing.csv" %date) #final cohort database n=11493 subject_id’s (7/6/18)
annotated_gram = pd.read_csv('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/processed/21012019_gram_categorizing-nsp.csv')
#organisms that grew in sc in our patients. annotation key:
#1= gram +
#0= gram -
#2= mycobacterium or other
pos_cultures_pt_df_ab_cohort= pd.merge(pos_cultures_pt_df_ab_cohort,annotated_gram[['org_name','Gram_pos']],
left_on= 'org_name',
right_on='org_name',
how = 'left')
#notneg_cultures_pt_df_ab_cohort.drop(i)
#final_pts_window_pos_sc.loc[final_pts_window_pos_sc.loc[:,'staph_count']>2,:]
#len(final_pts_window_pos_sc) #removed duplicate ['hadm_id', 'org_name', 'charttime'] =2848
len(pos_cultures_pt_df_ab_cohort.drop_duplicates(['hadm_id', 'org_name', 'charttime']))
pos_cultures_pt_df_ab_cohort.groupby('hadm_id')['Gram_pos'].apply(lambda x: min(x)).reset_index()
```
### make list of all patients positive, negative sc cultures
```
#5/2/18, makes list of all pos culture result and specimen types for annotation on final df
#organism name
pos_cultures_pt_df_ab_cohort2 = pos_cultures_pt_df_ab_cohort.groupby('hadm_id')['Gram_pos'].apply(lambda x: min(x)).reset_index()
pos_cultures_pt_df_ab_cohort3 = pos_cultures_pt_df_ab_cohort.groupby('hadm_id')['org_name'].apply(', '.join).reset_index() #changed to hadm_id from subject_id 08/16/2018
pos_cultures_pt_df_ab_cohort3 = pos_cultures_pt_df_ab_cohort3.rename(index=str, columns={'org_name':'org_list'})
#specimen type
pos_cultures_pt_df_ab_cohort4 = pos_cultures_pt_df_ab_cohort.groupby('hadm_id')['spec_type_desc'].apply(', '.join).reset_index() #changed to hadm_id from subject_id 08/16/2018
pos_cultures_pt_df_ab_cohort4 = pos_cultures_pt_df_ab_cohort4.rename(index=str, columns={'spec_type_desc':'spec_type_list'})
#now have a column with all positive culture results and specimen types
#first dates for positive culture within criteria window, else first culture within criteria window.
first_criteria_pos_culture= pos_cultures_pt_df_ab_cohort.groupby('hadm_id')['charttime'].apply(lambda x: min(x)).reset_index()
first_criteria_any_culture= all_cultures_pt_df_ab_cohort.groupby('hadm_id')['charttime'].apply(lambda x: min(x)).reset_index()
first_criteria_pos_culture= first_criteria_pos_culture.rename(index=str, columns={'charttime':'pos_charttime'})
first_criteria_any_culture= pd.merge(first_criteria_any_culture, first_criteria_pos_culture, left_on='hadm_id', right_on='hadm_id', how='left')
first_criteria_any_culture['first_pos_else_neg_ssc']= first_criteria_any_culture['pos_charttime']
neg_else = first_criteria_any_culture['first_pos_else_neg_ssc'].isnull()
first_criteria_any_culture.loc[neg_else, 'first_pos_else_neg_ssc'] = first_criteria_any_culture.loc[neg_else, 'charttime']
first_criteria_any_culture=first_criteria_any_culture[['hadm_id','first_pos_else_neg_ssc']]
```
# FINAL cohort generation
08/16/18 qc. after chaning the patient criteira, i found that a lot of patients with multiple icustays now have identical culture results. need to explore where this is occuring.
1) make final ab categorizations
2) make final patient list with ab, sterile site culture results and final binning
## this is final patient dataframe with n patients with following criteria:
* antibiotic in PRESCRIPTION table within 24 hours of an sterile site culture in MICROBIOLOGYEVENTS table
* who are over the age of 16. the culture result (annotated below) and antibiotic course (annotated below) are
* also annotated on this. these patients were then categorized into 4 categories:
* 'C_neg/A_partial'
* 'C_neg/A_full'
* 'C_pos/A_full'
* 'C_pos/A_partial'
#### notes:
* 1)i considered any patient who had their last antibiotic date at or after either
#hospital or ssn DOD as "full" ab course.
* 2) i considered any patients who had the listed staph infections positive at
#any of their sterile site cultures within the 72hour ab timewindow as a culture 'positive' patients.
* 3) time window for positive culture is defined in SSC_for_cohort (default 72hr from t_0_sc)
##### pd.DataFrame(final_pt_df2).to_csv('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/06072018_final_pt_df2.csv') #date may change
```
#4/30/18: ASSEMBLING A FINAL PT LIST.
#final_pts_window_pos_sc_list= list(final_pts_window_pos_sc['subject_id'].unique())
final_pt_df=0
final_pt_df = final_pt_df1#all_cultures_pt_df_ab_cohort[['subject_id','hadm_id','t_0_sc']] #needs icustay_id
final_pt_df = pd.merge(
final_pt_df,
ABrx_16sterile_ntnul_finalPT[['hadm_id','icustay_id','ab_course', 't_0','t_end_consec','ICU_admit']], #testing the ICU_admit with this
left_on= 'hadm_id', #changed from subject_id
right_on='hadm_id',
how = 'left')
final_pt_df = pd.merge(
final_pt_df,
dod_df2,
left_on= 'subject_id',
right_on='subject_id',
how = 'left')
final_pt_df = pd.merge(
final_pt_df,
pos_cultures_pt_df_ab_cohort3,
left_on= 'hadm_id', #changed to hadm_id from subject_id 08/16/18
right_on='hadm_id', #changed to hadm_id from subject_id 08/16/18
how = 'left')
final_pt_df = pd.merge(
final_pt_df,
pos_cultures_pt_df_ab_cohort4,
left_on= 'hadm_id', #changed to hadm_id from subject_id 08/16/18
right_on='hadm_id', #changed to hadm_id from subject_id 08/16/18
how = 'left')
final_pt_df = pd.merge(
final_pt_df,
pos_cultures_pt_df_ab_cohort2,
left_on= 'hadm_id', #changed to hadm_id from subject_id 08/16/18
right_on='hadm_id', #changed to hadm_id from subject_id 08/16/18
how = 'left')
final_pt_df = pd.merge(
final_pt_df,
first_criteria_any_culture,
left_on= 'hadm_id', #changed to hadm_id from subject_id 08/16/18
right_on='hadm_id', #changed to hadm_id from subject_id 08/16/18
how = 'left')
final_pt_df= final_pt_df.rename(index=str, columns={'delta':'dod_delta'})
final_pt_df['sc_result']=''
final_pt_df['final_bin']=''
#task 1, marking of all patients positive, negative sc cultures
final_pt_df.loc[final_pt_df.loc[:,'hadm_id'].isin(pos_cultures_pt_df_ab_cohort_list),'sc_result']='positive' #changed to hadm_id from subject_id 08/16/18
final_pt_df.loc[final_pt_df.loc[:,'sc_result']!='positive','sc_result']='negative'
#1 is positive, 0 is negative
#FINAL_PT_LIST=list(final_pt_df['subject_id'].unique())
final_pt_df.loc[(final_pt_df['ab_course']=='partial')
& (final_pt_df['sc_result']=='negative'),'final_bin']='C_neg/A_partial'
final_pt_df.loc[(final_pt_df['ab_course']=='full')
& (final_pt_df['sc_result']=='negative'),'final_bin']='C_neg/A_full'
final_pt_df.loc[(final_pt_df['ab_course']=='full')
& (final_pt_df['sc_result']=='positive'),'final_bin']='C_pos/A_full'
final_pt_df.loc[(final_pt_df['ab_course']=='partial')
& (final_pt_df['sc_result']=='positive'),'final_bin']='C_pos/A_partial'
#len(list(final_pts_window_pos_sc['subject_id'].unique())) #2630
final_pt_df
final_pt_df2= final_pt_df.loc[final_pt_df.loc[:,'ab_course']!='',:].drop_duplicates('hadm_id')
final_pt_df2.loc[final_pt_df2.loc[:,'sc_result']=='positive','Gram_pos'].value_counts()
print(final_pt_df2['icustay_id'].nunique(), final_pt_df2['subject_id'].nunique())
print(final_pt_df2['final_bin'].value_counts())#11493
#4921+3539+481+188
#(481+188)/9129 ##7.3%
n2= 4688 +3422 + 439+ 182
(439+182)/n2 ##7.1%
final_pt_df2['sc_result'].value_counts() # 8460 669
final_pt_df2['Gram_pos'].value_counts()
```
#prior to 72 hour change
- C_neg/A_full 6489 -> 7041 (all culture for pt, only blood for sc results) / 6000 (all cultures)
- C_neg/A_partial 5788 -> 6663 / 5546
- C_pos/A_full 1008 -> 1010 / 2127
- C_pos/A_partial 478 -> 493 / 1534
```
final_pt_df2
```
# assessing t_0, t_0_sc & icuadmit
```
# final_pt_df3= final_pt_df2[final_pt_df2['sc_result']=='positive']
# final_pt_df3
# #t_0_sc is the first culture within 24 hours to t_0. how often is this the first positive cultur date?
# ###################
# #final_pts_window_pos_sc.head()
# first_pos_sc= final_pts_window_pos_sc.groupby('hadm_id')['charttime'].agg(['min']).rename(columns={'min': 'first_pos_ssc'}).reset_index()
# #pd.Dafirst_pos_sc
# test_df= pd.merge(final_pt_df3,first_pos_sc)
# #test_df.head()
# delta= test_df['first_pos_ssc']-test_df['t_0_sc'] #
# delta[delta!='0 days 00:00:00'].describe()
# delta.describe()
# ##good news is first positive ssc is never sooner than t_0_sc.
# #looking at all sc chartdates vs t_0 for all pt
# #final_pts_window_pos_sc
# all_sc_and_t0=pd.merge(final_pts_window_pos_sc,
# ABrx_16sterile_ntnul_finalPT[['hadm_id','ab_course', 't_0','t_end_consec','ICU_admit']],
# left_on='hadm_id',
# right_on='hadm_id',
# how='left')
# all_sc_and_t0['sc_t0_delta']= pd.to_datetime(all_sc_and_t0['charttime']) -pd.to_datetime(all_sc_and_t0['t_0'])
# all_sc_and_t0['sc_admit_delta']= pd.to_datetime(all_sc_and_t0['charttime']) -pd.to_datetime(all_sc_and_t0['ICU_admit'])
# all_sc_and_t0['t_0_admit_delta']= pd.to_datetime(all_sc_and_t0['t_0']) -pd.to_datetime(all_sc_and_t0['ICU_admit'])
# all_sc_and_t0['sc_admit_delta'].describe(percentiles=[0.25,0.5,0.6,0.75,0.9]) #the time difference between sterile culture and ICU admission time
# ##the minimum value on this is perplexing,
# ##it suggests the one patient had their first sc positive culture done about 36 hours PRIOR to being admitted to icu.
# #this patient had the longest time between their SC and being admitted to ICU. one of the few cases where sc and t_0 started before ICU admit.
# all_sc_and_t0[all_sc_and_t0['sc_admit_delta']=='-2 days +12:47:05'] #hadm_id 154930
# all_sc_and_t0[all_sc_and_t0['hadm_id']==154930]
# #how many cases where sc and t_0 started before ICU admit?
# #all_sc_and_t0['t_0_admit_delta']<pd.Timedelta(0)
# #all_sc_and_t0[(all_sc_and_t0['sc_admit_delta']<pd.Timedelta(0))]
# c1=(all_sc_and_t0['sc_admit_delta']<pd.Timedelta(0))
# c2=(all_sc_and_t0['t_0_admit_delta']<pd.Timedelta(0))
# all_sc_and_t0[(c1 & c2)]['hadm_id'].nunique()
# #322/1503 patients where both sc and t_0 started prior to ICU admission... damn... is this a problem?
# sterile_all_pt_info[sterile_all_pt_info['hadm_id']==154930].drop_duplicates(['hadm_id','culture_charttime','ICU_admit' ])[[
# 'hadm_id','culture_charttime','first_wardid','ICU_admit' ,'hosp_admit', 'admission_location','admission_type',]]
# all_sc_and_t0['sc_t0_delta'].describe() #about 1/2 of the patients got their bacterial cutlure
# ## average is 1.5hours after culture ab were started (but rounding error makes this approx)
#average time between
#all_sc_and_t0['t_0_admit_delta'].describe(percentiles=[0.25, 0.5, 0.75, 0.9]) #the time difference between t_0 and ICU admission time
## looks like lowest delta suggests the earliest t_0 occured 12 hours before admission time (though again this is likely lower due to rounding).
## average is 10.5 hours after admission ab were started
```
##need to collapse to first positive ssc
###NOTE, I WILL NEED TO COME BACK TO THIS IN THE FUTURE TO PRIORITIZE GRAM + IF THERE ARE 2 POSITIVE SSC
#### few concerns:
* about 1/2 of the patients got their first positive bacterial cutlure performed prior to the t_0 date.
* this can be a big deal if it happened too far BEFORE, it could bea big deal
* in about 60% of icustay_id's, the sterile culture occured before they were admitted to the ICU.
* similarly: 322/1503 patients where both first positive ssc and t_0 (ab start date) occured prior to ICU admission (within 24 hours)... these are likely transfer patients
* note: the t_0 date is a date, and not a time, so this limits how accurate we can be about the ab start date comparison. howver admission time and culture time have time resolution.
* patients may have more than 1 positive culture within time window, should I prioritize certain ones over others?
* some patients may have been transfered in
```
##looking at first pos sc
# #final_pts_window_pos_sc.head()
# first_pos_sc= final_pts_window_pos_sc.groupby('hadm_id')['charttime'].agg(['min']).rename(columns={'min': 'first_pos_ssc'}).reset_index()
# #pd.Dafirst_pos_sc
# test_df= pd.merge(final_pt_df3,first_pos_sc)
# #test_df.head()
# delta= test_df['first_pos_ssc']-test_df['t_0_sc'] #
# delta[delta!='0 days 00:00:00'].describe()
# delta.describe()
# ##all info for first pos ssc
# ##need to collapse to first positive ssc
# ###NOTE, I WILL NEED TO COME BACK TO THIS IN THE FUTURE TO PRIORITIZE GRAM + IF THERE ARE 2 POSITIVE SSC
# ###there's a few concerns:
# ###1:
# first_sc_and_t0= all_sc_and_t0.groupby('hadm_id', as_index=False)['charttime'].agg(['min']).rename(columns={'min': 'first_pos_ssc'}).reset_index()
# first_sc_and_t0 =pd.merge(first_sc_and_t0,
# all_sc_and_t0,
# left_on=['hadm_id','first_pos_ssc'],
# right_on=['hadm_id','charttime'])
```
### stats for first positive ssc
```
# ###stats for first positive sc.
# first_sc_and_t0['sc_t0_delta'].describe()
#do i need to grab clinical data in previous project between icu admit and t_0 +72?
##no
# first_sc_and_t0[first_sc_and_t0['t_0_admit_delta']>pd.Timedelta('3 day')] #89
# ##blood cultures rather than all ssc are for cleaner dataset.
# first_sc_and_t0
# #look for first positive blood culture AFTER ICU ADMISSION.
# all_sc_and_t0_icu=all_sc_and_t0[(all_sc_and_t0['charttime']- all_sc_and_t0['ICU_admit'])>pd.Timedelta('0 day')]
# #1700 -> 1116 rows if looking at only positive cultures within ICU.
# #find clinical data 12-24 hours after the first positive blood culture in icu.
# #need to dig through clinical data and see which variables i may need to extract.
# ##find the avg time between icu admit and first positive blood culture in icu being obtained.
# (all_sc_and_t0_icu['charttime']- all_sc_and_t0_icu['ICU_admit']).describe()
# ##for those who have blood culture obtained, for those who are pos what kind, and if not then -.
# ##clinical use case: we know that some ppl have positive infections,
# ##and for those who would like to taior their ab to targeted towards type of infection.
# ##if they ar epositive, what is the likelyhood of the bacteria being positive?
# ## bayesian hierarchial model: first model +/- (multilevel prediction model)
# ##second model: for pos gram+ or gram -.
```
# cohort generated above
dr luo request:
It would also be good to add the start and end time for the antibiotics. --DONE
Dr. Nelson request:
1)provide an extra column with the DOD. --DONE
2)one with the first positive culture results (the actual value, as in “Enteroccous Faecalis”, etc.) for those with positive cultures,
3)the type of culture (eg. urine, blood, etc.).
```
# #5/15/18 to do, regenerate this for our cohort to get count of sterile site cultures.
# #breakdown the 1345 pts cultures,
# #if a patient has multiple culture types within 24 hr peroid, would be nice to know which are +/-.
# #more informative to know if pt was ever pos for bloodculture.
# #Look at distribution of all cultures within the 24 hour mark.
# %pylab inline
# pylab.rcParams['figure.figsize'] = (10, 6)
# sterile_test_df.spec_type_desc.value_counts().head(20).sort_values(ascending=True).plot.barh()
# #sc maths
# pa2_final_pt= final_pt_df2[final_pt_df2['sc_result']=="positive"]
# #need to be sure i have currently listd the first positive ssc date within t_0 to t_72 window.
# ##don't need to collect additional data prior to t_0 for ongoing project.
# pa2_final_pt
```
# compiling the dataframes into csv's
```
date='25012019'
pd.DataFrame(final_pt_df2).to_csv("/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_final_pt_PA_df2.csv" %date) #final cohort database n=11493 subject_id’s (7/6/18)
```
style guide stuff:
79 characters is longest a line should be, this is 79 chars:
Address the unique rows issue in the sterile culture. Need to use this to filkter down the staph.
```
#last instance of ICU_admit time.
#final_pt_df2
(pd.to_datetime(final_pt_df2['t_0'])-pd.to_datetime(final_pt_df2['ICU_admit'])).describe()
```
# may need to remove these 5% of patients:
these patients are not likely admitted to icu with suspecision of infection, but rather develop it after t_0
```
# final_pt_df2['admit_vs_t_0']= (pd.to_datetime(final_pt_df2['t_0'])-pd.to_datetime(final_pt_df2['ICU_admit']))
# final_pt_df2[final_pt_df2['admit_vs_t_0']>pd.Timedelta('4 day')]
# #535
# len(final_pt_df2)
# 812/15207
# final_pt_df2['admit_vs_t_0_td']=final_pt_df2['admit_vs_t_0'].astype(np.timedelta64)
# final_pt_df2.sort_values('admit_vs_t_0_td')
# final_pt_df2[final_pt_df2['admit_vs_t_0_td']<-1e+14].sort_values('admit_vs_t_0_td')['subject_id'].nunique() #3519/11488 patients have a delta of greater than 2 days of their icustay
```
| github_jupyter |
## 1. Google Play Store apps and reviews
<p>Mobile apps are everywhere. They are easy to create and can be lucrative. Because of these two factors, more and more apps are being developed. In this notebook, we will do a comprehensive analysis of the Android app market by comparing over ten thousand apps in Google Play across different categories. We'll look for insights in the data to devise strategies to drive growth and retention.</p>
<p><img src="https://assets.datacamp.com/production/project_619/img/google_play_store.png" alt="Google Play logo"></p>
<p>Let's take a look at the data, which consists of two files:</p>
<ul>
<li><code>apps.csv</code>: contains all the details of the applications on Google Play. There are 13 features that describe a given app.</li>
<li><code>user_reviews.csv</code>: contains 100 reviews for each app, <a href="https://www.androidpolice.com/2019/01/21/google-play-stores-redesigned-ratings-and-reviews-section-lets-you-easily-filter-by-star-rating/">most helpful first</a>. The text in each review has been pre-processed and attributed with three new features: Sentiment (Positive, Negative or Neutral), Sentiment Polarity and Sentiment Subjectivity.</li>
</ul>
```
# Read in dataset
import pandas as pd
apps_with_duplicates = pd.read_csv("datasets/apps.csv")
# Drop duplicates from apps_with_duplicates
apps = apps_with_duplicates.drop_duplicates()
# Print the total number of apps
print('Total number of apps in the dataset = ', apps['App'].count())
# Have a look at a random sample of 5 rows
apps.head()
```
## 2. Data cleaning
<p>Data cleaning is one of the most essential subtask any data science project. Although it can be a very tedious process, it's worth should never be undermined.</p>
<p>By looking at a random sample of the dataset rows (from the above task), we observe that some entries in the columns like <code>Installs</code> and <code>Price</code> have a few special characters (<code>+</code> <code>,</code> <code>$</code>) due to the way the numbers have been represented. This prevents the columns from being purely numeric, making it difficult to use them in subsequent future mathematical calculations. Ideally, as their names suggest, we would want these columns to contain only digits from [0-9].</p>
<p>Hence, we now proceed to clean our data. Specifically, the special characters <code>,</code> and <code>+</code> present in <code>Installs</code> column and <code>$</code> present in <code>Price</code> column need to be removed.</p>
<p>It is also always a good practice to print a summary of your dataframe after completing data cleaning. We will use the <code>info()</code> method to acheive this.</p>
```
# List of characters to remove
chars_to_remove = ['+', ',', '$']
# List of column names to clean
cols_to_clean = ['Installs', 'Price']
# Loop for each column in cols_to_clean
for col in cols_to_clean:
# Loop for each char in chars_to_remove
for char in chars_to_remove:
# Replace the character with an empty string
apps[col] = apps[col].apply(lambda x: x.replace(char, ''))
# Print a summary of the apps dataframe
print(apps.info())
```
## 3. Correcting data types
<p>From the previous task we noticed that <code>Installs</code> and <code>Price</code> were categorized as <code>object</code> data type (and not <code>int</code> or <code>float</code>) as we would like. This is because these two columns originally had mixed input types: digits and special characters. To know more about Pandas data types, read <a href="https://datacarpentry.org/python-ecology-lesson/04-data-types-and-format/">this</a>.</p>
<p>The four features that we will be working with most frequently henceforth are <code>Installs</code>, <code>Size</code>, <code>Rating</code> and <code>Price</code>. While <code>Size</code> and <code>Rating</code> are both <code>float</code> (i.e. purely numerical data types), we still need to work on <code>Installs</code> and <code>Price</code> to make them numeric.</p>
```
import numpy as np
# Convert Installs to float data type
apps['Installs'] = apps['Installs'].astype(float)
# Convert Price to float data type
apps['Price'] = apps['Price'].astype(float)
# Checking dtypes of the apps dataframe
print(apps.info())
```
## 4. Exploring app categories
<p>With more than 1 billion active users in 190 countries around the world, Google Play continues to be an important distribution platform to build a global audience. For businesses to get their apps in front of users, it's important to make them more quickly and easily discoverable on Google Play. To improve the overall search experience, Google has introduced the concept of grouping apps into categories.</p>
<p>This brings us to the following questions:</p>
<ul>
<li>Which category has the highest share of (active) apps in the market? </li>
<li>Is any specific category dominating the market?</li>
<li>Which categories have the fewest number of apps?</li>
</ul>
<p>We will see that there are <code>33</code> unique app categories present in our dataset. <em>Family</em> and <em>Game</em> apps have the highest market prevalence. Interestingly, <em>Tools</em>, <em>Business</em> and <em>Medical</em> apps are also at the top.</p>
```
import plotly
plotly.offline.init_notebook_mode(connected=True)
import plotly.graph_objs as go
# Print the total number of unique categories
num_categories = len(apps['Category'].unique())
print('Number of categories = ', num_categories)
# Count the number of apps in each 'Category'.
num_apps_in_category = apps['Category'].value_counts()
# Sort num_apps_in_category in descending order based on the count of apps in each category
sorted_num_apps_in_category = num_apps_in_category.sort_values()
data = [go.Bar(
x = num_apps_in_category.index, # index = category name
y = num_apps_in_category.values, # value = count
)]
plotly.offline.iplot(data)
```
## 5. Distribution of app ratings
<p>After having witnessed the market share for each category of apps, let's see how all these apps perform on an average. App ratings (on a scale of 1 to 5) impact the discoverability, conversion of apps as well as the company's overall brand image. Ratings are a key performance indicator of an app.</p>
<p>From our research, we found that the average volume of ratings across all app categories is <code>4.17</code>. The histogram plot is skewed to the left indicating that the majority of the apps are highly rated with only a few exceptions in the low-rated apps.</p>
```
# Average rating of apps
avg_app_rating = apps['Rating'].mean()
print('Average app rating = ', avg_app_rating)
# Distribution of apps according to their ratings
data = [go.Histogram(
x = apps['Rating']
)]
# Vertical dashed line to indicate the average app rating
layout = {'shapes': [{
'type' :'line',
'x0': avg_app_rating,
'y0': 0,
'x1': avg_app_rating,
'y1': 1000,
'line': { 'dash': 'dashdot'}
}]
}
plotly.offline.iplot({'data': data, 'layout': layout})
```
## 6. Size and price of an app
<p>Let's now examine app size and app price. For size, if the mobile app is too large, it may be difficult and/or expensive for users to download. Lengthy download times could turn users off before they even experience your mobile app. Plus, each user's device has a finite amount of disk space. For price, some users expect their apps to be free or inexpensive. These problems compound if the developing world is part of your target market; especially due to internet speeds, earning power and exchange rates.</p>
<p>How can we effectively come up with strategies to size and price our app?</p>
<ul>
<li>Does the size of an app affect its rating? </li>
<li>Do users really care about system-heavy apps or do they prefer light-weighted apps? </li>
<li>Does the price of an app affect its rating? </li>
<li>Do users always prefer free apps over paid apps?</li>
</ul>
<p>We find that the majority of top rated apps (rating over 4) range from 2 MB to 20 MB. We also find that the vast majority of apps price themselves under \$10.</p>
```
%matplotlib inline
import seaborn as sns
sns.set_style("darkgrid")
import warnings
warnings.filterwarnings("ignore")
# Select rows where both 'Rating' and 'Size' values are present (ie. the two values are not null)
apps_with_size_and_rating_present = apps[(~apps['Rating'].isnull()) & (~apps['Size'].isnull())]
# Subset for categories with at least 250 apps
large_categories = apps_with_size_and_rating_present.groupby(['Category']).filter(lambda x: len(x) >= 250)
# Plot size vs. rating
plt1 = sns.jointplot(x = large_categories['Size'], y = large_categories['Rating'])
# Select apps whose 'Type' is 'Paid'
paid_apps = apps_with_size_and_rating_present[apps_with_size_and_rating_present['Type'] == 'Paid']
# Plot price vs. rating
plt2 = sns.jointplot(x = paid_apps['Price'], y = paid_apps['Rating'])
```
## 7. Relation between app category and app price
<p>So now comes the hard part. How are companies and developers supposed to make ends meet? What monetization strategies can companies use to maximize profit? The costs of apps are largely based on features, complexity, and platform.</p>
<p>There are many factors to consider when selecting the right pricing strategy for your mobile app. It is important to consider the willingness of your customer to pay for your app. A wrong price could break the deal before the download even happens. Potential customers could be turned off by what they perceive to be a shocking cost, or they might delete an app they’ve downloaded after receiving too many ads or simply not getting their money's worth.</p>
<p>Different categories demand different price ranges. Some apps that are simple and used daily, like the calculator app, should probably be kept free. However, it would make sense to charge for a highly-specialized medical app that diagnoses diabetic patients. Below, we see that <em>Medical and Family</em> apps are the most expensive. Some medical apps extend even up to \$80! All game apps are reasonably priced below \$20.</p>
```
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
# Select a few popular app categories
popular_app_cats = apps[apps.Category.isin(['GAME', 'FAMILY', 'PHOTOGRAPHY',
'MEDICAL', 'TOOLS', 'FINANCE',
'LIFESTYLE','BUSINESS'])]
# Examine the price trend by plotting Price vs Category
ax = sns.stripplot(x = popular_app_cats['Price'], y = popular_app_cats['Category'], jitter=True, linewidth=1)
ax.set_title('App pricing trend across categories')
# Apps whose Price is greater than 200
apps_above_200 = apps[apps['Price'] > 200]
apps_above_200[['Category', 'App', 'Price']]
```
## 8. Filter out "junk" apps
<p>It looks like a bunch of the really expensive apps are "junk" apps. That is, apps that don't really have a purpose. Some app developer may create an app called <em>I Am Rich Premium</em> or <em>most expensive app (H)</em> just for a joke or to test their app development skills. Some developers even do this with malicious intent and try to make money by hoping people accidentally click purchase on their app in the store.</p>
<p>Let's filter out these junk apps and re-do our visualization.</p>
```
# Select apps priced below $100
apps_under_100 = popular_app_cats[popular_app_cats['Price'] < 100]
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
# Examine price vs category with the authentic apps (apps_under_100)
ax = sns.stripplot(x = 'Price', y = 'Category', data = apps_under_100, jitter = True, linewidth = 1)
ax.set_title('App pricing trend across categories after filtering for junk apps')
```
## 9. Popularity of paid apps vs free apps
<p>For apps in the Play Store today, there are five types of pricing strategies: free, freemium, paid, paymium, and subscription. Let's focus on free and paid apps only. Some characteristics of free apps are:</p>
<ul>
<li>Free to download.</li>
<li>Main source of income often comes from advertisements.</li>
<li>Often created by companies that have other products and the app serves as an extension of those products.</li>
<li>Can serve as a tool for customer retention, communication, and customer service.</li>
</ul>
<p>Some characteristics of paid apps are:</p>
<ul>
<li>Users are asked to pay once for the app to download and use it.</li>
<li>The user can't really get a feel for the app before buying it.</li>
</ul>
<p>Are paid apps installed as much as free apps? It turns out that paid apps have a relatively lower number of installs than free apps, though the difference is not as stark as I would have expected!</p>
```
trace0 = go.Box(
# Data for paid apps
y = apps[apps['Type'] == 'Paid']['Installs'],
name = 'Paid'
)
trace1 = go.Box(
# Data for free apps
y = apps[apps['Type'] == 'Free']['Installs'],
name = 'Free'
)
layout = go.Layout(
title = "Number of downloads of paid apps vs. free apps",
yaxis = dict(title = "Log number of downloads",
type = 'log',
autorange = True)
)
# Add trace0 and trace1 to a list for plotting
data = [trace0, trace1]
plotly.offline.iplot({'data': data, 'layout': layout})
```
## 10. Sentiment analysis of user reviews
<p>Mining user review data to determine how people feel about your product, brand, or service can be done using a technique called sentiment analysis. User reviews for apps can be analyzed to identify if the mood is positive, negative or neutral about that app. For example, positive words in an app review might include words such as 'amazing', 'friendly', 'good', 'great', and 'love'. Negative words might be words like 'malware', 'hate', 'problem', 'refund', and 'incompetent'.</p>
<p>By plotting sentiment polarity scores of user reviews for paid and free apps, we observe that free apps receive a lot of harsh comments, as indicated by the outliers on the negative y-axis. Reviews for paid apps appear never to be extremely negative. This may indicate something about app quality, i.e., paid apps being of higher quality than free apps on average. The median polarity score for paid apps is a little higher than free apps, thereby syncing with our previous observation.</p>
<p>In this notebook, we analyzed over ten thousand apps from the Google Play Store. We can use our findings to inform our decisions should we ever wish to create an app ourselves.</p>
```
# Load user_reviews.csv
reviews_df = pd.read_csv('datasets/user_reviews.csv')
# Join the two dataframes
merged_df = apps.merge(reviews_df)
# Drop NA values from Sentiment and Review columns
merged_df = merged_df.dropna(subset = ['Sentiment', 'Review'])
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
# User review sentiment polarity for paid vs. free apps
ax = sns.boxplot(x = 'Type', y = 'Sentiment_Polarity', data = merged_df)
ax.set_title('Sentiment Polarity Distribution')
```
| github_jupyter |
# Polymer data
Huan Tran, Georgia Institute of Technology, huan.tran@mse.gatech.edu
This notebook provides scripts used to train four datasets provided by Polymer Genome (PG, https://www.polymergenome.org/), including the (DFT) HSE band gap, the (electronic and ionic) dielectric constants, and the atomization energy of about 380 organic polymers. Polymers crystals in each dataset were fingerprinted at 3 levels of atomic-level fingerprints, i.e., singles, doubles, and triples (from lower to higher length scales). Three fingerprinted datasets are then named as "**fp_aS.csv**", "**fp_aD.csv**", and "**fp_aT.csv**". Given the nature of the physical properties demonstrated here, these atomic-level fingerprints (described below) are sufficient, no need to go use fingerprints of longer length scales. Data files are in csv format in which each polymer is in a line. The *first column* is for polymer id, the *next four columns* are for atomization energy, band gap, electronic, and ionic dielectric constants, and the *remaining columns* are for fingerprints.
Details on the data curation are given in *[Huan et el, Sci. Data **3**, 160012 (2016)]*. This includes how to get polymers crystal structures, what level of DFT used for computations, to what extent the data is validated, and so on. The atomic fragment-based fingerprints are described in *[Huan et al., Phys. Rev. B **92**, 014106 (2015)]*, in which the motivation, the definition, and the relations of the fingerprints are described. In short, these fingerprints capture how many fragments of a given type that show up in the polymer. In the figure below, C2 is **a single**, representing a C atom with two bonds, and O2 is another **single** representing an O atom with two bonds. A **double** or **triple** contains two or three singles in a given order. From the chemistry, more information can be readily extracted. For example, two bonds of a C2 must be double bonds while for C3, two are single bonds and the other is a double bond.

The scripts provided below will train the Gaussian Process Regression (GPR) models on the provided data. GPR is used in PG for some reasons, i.e., it is quite intuitive and a measure of uncertainty can be obtained. One needs to update the data file name in this notebook to make the model of interest. In general, it is expected that models based on higher levels of atomic fingerprints will be better than those based on lower levels.
Materials used for this hackathon are the results of some polymer-related projects pursued in our research group, led by Prof. Rampi Ramprasad at Georgia Institute of Technology. Of these projects, Polymer Genome aims at developing an informatics platform for polymers predictions and design. Data, the most essential component of PG, is currently in a significantly expanding phase with supports from Toyota Research Institute.
*NOTICE: All information contained herein is, and remains the property of Georgia Tech Research Corporation and its sponsors, if any. The intellectual and technical concepts contained herein are proprietary to Georgia Tech Research Corporation and its sponsors and may be covered by U.S. and Foreign Patents, patents in process, and are protected by trade secret or copyright law. Dissemination of this information or reproduction of this material is strictly forbidden unless prior written permission is obtained from Georgia Tech Research Corporation.*
```
# Some necessary modules are loaded
import numpy as np
import pandas as pd
import boto3
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import mean_squared_error
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, RBF
from tqdm import tqdm
def hook(t):
def inner(bytes_amount):
t.update(bytes_amount)
return inner
#session = boto3.session.Session(profile_name='hack2020')
#s3 = session.client('s3')
s3 = boto3.client('s3')
for polymer_csv in ['fp_aS.csv', 'fp_aD.csv', 'fp_aT.csv']:
filesize = boto3.resource('s3').Object('hackathon2020-prod', 'data/' + polymer_csv).content_length
with tqdm(total=filesize, unit='B', unit_scale=True, desc=polymer_csv) as t:
s3.download_file('hackathon2020-prod', 'data/' + polymer_csv, polymer_csv, Callback=hook(t))
# Reading data from the data file
df = pd.read_csv('fp_aT.csv', delimiter=',', header=0)
data_tot = np.array(df)
# Select property
prop_sel = "band gap"
if prop_sel == "band gap":
data_sel = np.delete(data_tot,[0,2,3,4],axis=1)
elif prop_sel == "atomization energy":
data_sel = np.delete(data_tot,[0,1,3,4],axis=1)
elif prop_sel == "electronic dielectric":
data_sel = np.delete(data_tot,[0,1,2,4],axis=1)
elif prop_sel == "ionic dielectric":
data_sel = np.delete(data_tot,[0,1,2,3],axis=1)
# Remove NaN data
data_sel_nonan = data_sel[~np.isnan(data_sel).any(axis=1)]
# X (fingerprint) and Y (property) of the polymers
X = data_sel_nonan[:,1:]
Y = data_sel_nonan[:,0]
#print (np.shape(data_sel))
#print (np.shape(data_sel_nonan))
# Split the data into training and test sets
test_size = 0.20
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = test_size, random_state=1)
# Some initial parameters to determine the hyperparameters
Y_average = np.average(Y)
noise_avr = np.std(Y)
noise_lb = noise_avr/10
noise_ub = noise_avr*10
n_fold = 5
# The prior of the GPR model
kernel = (Y_average)**2*RBF(length_scale=1)+WhiteKernel(noise_level=noise_avr**2,noise_level_bounds=(noise_lb**2, noise_ub**2))
gp = GaussianProcessRegressor(kernel=kernel, alpha=0, n_restarts_optimizer=5)
#Three hyper parameters of a GPR can be seen by uncommenting and executing the following line
#for hyperparameter in kernel.hyperparameters: print(hyperparameter)
# Now training the GPR model
opt_gp = gp
opt_rmse = 1.0E20
ncv = 0
ncv_opt = ncv
# Training set splitted into n_fold subsets
kf_ = KFold(n_splits=n_fold, shuffle = True)
kf = kf_.split(Y_train)
# Loop for the best kernal
for train, test in kf:
X_cv_train = X_train[train]
X_cv_test = X_train[test]
Y_cv_train = Y_train[train]
Y_cv_test = Y_train[test]
gp = GaussianProcessRegressor(kernel=kernel, alpha=0, n_restarts_optimizer=10)
gp.fit(X_cv_train, Y_cv_train)
y_cv_train = gp.predict(X_cv_train, return_std=False)
y_cv_test = gp.predict(X_cv_test, return_std=False)
rmse_cv_train = np.sqrt(mean_squared_error(Y_cv_train, y_cv_train))
rmse_cv_test = np.sqrt(mean_squared_error(Y_cv_test, y_cv_test))
print(' ncv, rmse_train, rmse_test: ', ncv, rmse_cv_train, rmse_cv_test)
if rmse_cv_test < opt_rmse:
opt_rmse = rmse_cv_test
opt_gp = gp
ncv_opt = ncv
ncv = ncv + 1
print(' Optimal ncv: ', ncv_opt, "; optimal kernel saved.")
# Come back to the initial training and sets
X_train_final = X_train
X_test_final = X_test
# Take the optimal kernel (hyperparameters) to "train" the model on the initial training set
gp_final = GaussianProcessRegressor(kernel=opt_gp.kernel_, alpha=0, optimizer=None)
gp_final.fit(X_train_final, Y_train)
# Make predictions
y_train = gp_final.predict(X_train_final, return_std=False)
y_test = gp_final.predict(X_test_final, return_std=False)
# Error measures
rmse_train = np.sqrt(mean_squared_error(Y_train, y_train))
rmse_test = np.sqrt(mean_squared_error(Y_test, y_test))
R2_train_ = gp_final.score(X_train_final, Y_train)
R2_test_ = gp_final.score(X_test_final, Y_test)
# Three optimal hyperparameters can be obtained by the following lines
#print ("k1.k1.constant_value = " + str(gp_final.kernel_.k1.k1.constant_value))
#print ("k2.noise_level = " + str(gp_final.kernel_.k2.noise_level))
#print ("k2.k2.length_scale = " + str(gp_final.kernel_.k1.k2.length_scale))
# Visualize the prediction
%matplotlib inline
import matplotlib.pyplot as plt
train_size = 1.0-test_size
label_train = 'Train: size = ' + str(train_size) +'; R2 = ' + str('%.3f' % R2_train_) + '; rmse = ' + str(
'%.3f' % rmse_train)
label_test = 'Test: size = ' + str(test_size) + '; R2 = ' + str('%.3f' % R2_test_) + '; rmse = ' + str(
'%.3f' % rmse_test)
plt.figure(figsize=(8, 8))
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
lim_min = min(min(Y_train), min(Y_test), min(y_train), min(y_test))
lim_max = max(max(Y_train), max(Y_test), max(y_train), max(y_test))
lim = [lim_min - (lim_max - lim_min) * 0.1, lim_max + (lim_max - lim_min) * 0.1]
plt.xlim(lim)
plt.ylim(lim)
plt.text(lim_min + (lim_max - lim_min) * 0.4, lim_min + (lim_max - lim_min) * 0.1, label_train)
plt.text(lim_min + (lim_max - lim_min) * 0.4, lim_min + (lim_max - lim_min) * 0.05, label_test)
if prop_sel == "band gap":
plt.xlabel("Computed band gap (eV)", size=17)
plt.ylabel("Predicted band gap (eV)", size=17)
elif prop_sel == "atomization energy":
plt.xlabel("Computed atomization energy (eV/atom)", size=17)
plt.ylabel("Predicted atomization energy (eV/atom)", size=17)
elif prop_sel == "electronic dielectric":
plt.xlabel("Computed electronic dielectric", size=17)
plt.ylabel("Predicted electronic dielectric", size=17)
elif prop_sel == "ionic dielectric":
plt.xlabel("Computed ionic dielectric", size=17)
plt.ylabel("Predicted ionic dielectric", size=17)
plots_ = list()
plot_train = plt.scatter(Y_train, y_train, marker='o', label="train set")
plots_.append(plot_train)
plt.plot([Y_test, Y_test], [y_test, y_test])
plot_test = plt.scatter(Y_test, y_test, marker='s', label="test set")
plots_.append(plot_test)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.